repo_name
string
path
string
copies
string
size
string
content
string
license
string
dlumberg/kernel_asus_tf101
drivers/s390/char/vmcp.c
1540
5196
/* * Copyright IBM Corp. 2004,2010 * Interface implementation for communication with the z/VM control program * * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> * * z/VMs CP offers the possibility to issue commands via the diagnose code 8 * this driver implements a character device that issues these commands and * returns the answer of CP. * * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS */ #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <asm/compat.h> #include <asm/cpcmd.h> #include <asm/debug.h> #include <asm/uaccess.h> #include "vmcp.h" static debug_info_t *vmcp_debug; static int vmcp_open(struct inode *inode, struct file *file) { struct vmcp_session *session; if (!capable(CAP_SYS_ADMIN)) return -EPERM; session = kmalloc(sizeof(*session), GFP_KERNEL); if (!session) return -ENOMEM; session->bufsize = PAGE_SIZE; session->response = NULL; session->resp_size = 0; mutex_init(&session->mutex); file->private_data = session; return nonseekable_open(inode, file); } static int vmcp_release(struct inode *inode, struct file *file) { struct vmcp_session *session; session = file->private_data; file->private_data = NULL; free_pages((unsigned long)session->response, get_order(session->bufsize)); kfree(session); return 0; } static ssize_t vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) { ssize_t ret; size_t size; struct vmcp_session *session; session = file->private_data; if (mutex_lock_interruptible(&session->mutex)) return -ERESTARTSYS; if (!session->response) { mutex_unlock(&session->mutex); return 0; } size = min_t(size_t, session->resp_size, session->bufsize); ret = simple_read_from_buffer(buff, count, ppos, session->response, size); mutex_unlock(&session->mutex); return ret; } static ssize_t vmcp_write(struct file *file, const char __user *buff, size_t count, loff_t *ppos) { char *cmd; struct vmcp_session *session; if (count > 240) return -EINVAL; cmd = kmalloc(count + 1, GFP_KERNEL); if (!cmd) return -ENOMEM; if (copy_from_user(cmd, buff, count)) { kfree(cmd); return -EFAULT; } cmd[count] = '\0'; session = file->private_data; if (mutex_lock_interruptible(&session->mutex)) { kfree(cmd); return -ERESTARTSYS; } if (!session->response) session->response = (char *)__get_free_pages(GFP_KERNEL | __GFP_REPEAT | GFP_DMA, get_order(session->bufsize)); if (!session->response) { mutex_unlock(&session->mutex); kfree(cmd); return -ENOMEM; } debug_text_event(vmcp_debug, 1, cmd); session->resp_size = cpcmd(cmd, session->response, session->bufsize, &session->resp_code); mutex_unlock(&session->mutex); kfree(cmd); *ppos = 0; /* reset the file pointer after a command */ return count; } /* * These ioctls are available, as the semantics of the diagnose 8 call * does not fit very well into a Linux call. Diagnose X'08' is described in * CP Programming Services SC24-6084-00 * * VMCP_GETCODE: gives the CP return code back to user space * VMCP_SETBUF: sets the response buffer for the next write call. diagnose 8 * expects adjacent pages in real storage and to make matters worse, we * dont know the size of the response. Therefore we default to PAGESIZE and * let userspace to change the response size, if userspace expects a bigger * response */ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct vmcp_session *session; int __user *argp; int temp; session = file->private_data; if (is_compat_task()) argp = compat_ptr(arg); else argp = (int __user *)arg; if (mutex_lock_interruptible(&session->mutex)) return -ERESTARTSYS; switch (cmd) { case VMCP_GETCODE: temp = session->resp_code; mutex_unlock(&session->mutex); return put_user(temp, argp); case VMCP_SETBUF: free_pages((unsigned long)session->response, get_order(session->bufsize)); session->response=NULL; temp = get_user(session->bufsize, argp); if (get_order(session->bufsize) > 8) { session->bufsize = PAGE_SIZE; temp = -EINVAL; } mutex_unlock(&session->mutex); return temp; case VMCP_GETSIZE: temp = session->resp_size; mutex_unlock(&session->mutex); return put_user(temp, argp); default: mutex_unlock(&session->mutex); return -ENOIOCTLCMD; } } static const struct file_operations vmcp_fops = { .owner = THIS_MODULE, .open = vmcp_open, .release = vmcp_release, .read = vmcp_read, .write = vmcp_write, .unlocked_ioctl = vmcp_ioctl, .compat_ioctl = vmcp_ioctl, .llseek = no_llseek, }; static struct miscdevice vmcp_dev = { .name = "vmcp", .minor = MISC_DYNAMIC_MINOR, .fops = &vmcp_fops, }; static int __init vmcp_init(void) { int ret; if (!MACHINE_IS_VM) return 0; vmcp_debug = debug_register("vmcp", 1, 1, 240); if (!vmcp_debug) return -ENOMEM; ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view); if (ret) { debug_unregister(vmcp_debug); return ret; } ret = misc_register(&vmcp_dev); if (ret) debug_unregister(vmcp_debug); return ret; } device_initcall(vmcp_init);
gpl-2.0
jamiethemorris/SPH-L710_Kernel
drivers/usb/gadget/u_smd.c
2820
22401
/* * u_smd.c - utilities for USB gadget serial over smd * * Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This code also borrows from drivers/usb/gadget/u_serial.c, which is * Copyright (C) 2000 - 2003 Al Borchers (alborchers@steinerpoint.com) * Copyright (C) 2008 David Brownell * Copyright (C) 2008 by Nokia Corporation * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2000 Peter Berger (pberger@brimson.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/termios.h> #include <mach/msm_smd.h> #include <linux/debugfs.h> #include "u_serial.h" #define SMD_RX_QUEUE_SIZE 8 #define SMD_RX_BUF_SIZE 2048 #define SMD_TX_QUEUE_SIZE 8 #define SMD_TX_BUF_SIZE 2048 static struct workqueue_struct *gsmd_wq; #define SMD_N_PORTS 2 #define CH_OPENED 0 #define CH_READY 1 struct smd_port_info { struct smd_channel *ch; char *name; unsigned long flags; }; struct smd_port_info smd_pi[SMD_N_PORTS] = { { .name = "DS", }, { .name = "UNUSED", }, }; struct gsmd_port { unsigned port_num; spinlock_t port_lock; unsigned n_read; struct list_head read_pool; struct list_head read_queue; struct work_struct push; struct list_head write_pool; struct work_struct pull; struct gserial *port_usb; struct smd_port_info *pi; struct delayed_work connect_work; struct work_struct disconnect_work; /* At present, smd does not notify * control bit change info from modem */ struct work_struct update_modem_ctrl_sig; #define SMD_ACM_CTRL_DTR 0x01 #define SMD_ACM_CTRL_RTS 0x02 unsigned cbits_to_modem; #define SMD_ACM_CTRL_DCD 0x01 #define SMD_ACM_CTRL_DSR 0x02 #define SMD_ACM_CTRL_BRK 0x04 #define SMD_ACM_CTRL_RI 0x08 unsigned cbits_to_laptop; /* pkt counters */ unsigned long nbytes_tomodem; unsigned long nbytes_tolaptop; }; static struct smd_portmaster { struct mutex lock; struct gsmd_port *port; struct platform_driver pdrv; } smd_ports[SMD_N_PORTS]; static unsigned n_smd_ports; static void gsmd_free_req(struct usb_ep *ep, struct usb_request *req) { kfree(req->buf); usb_ep_free_request(ep, req); } static void gsmd_free_requests(struct usb_ep *ep, struct list_head *head) { struct usb_request *req; while (!list_empty(head)) { req = list_entry(head->next, struct usb_request, list); list_del(&req->list); gsmd_free_req(ep, req); } } static struct usb_request * gsmd_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags) { struct usb_request *req; req = usb_ep_alloc_request(ep, flags); if (!req) { pr_err("%s: usb alloc request failed\n", __func__); return 0; } req->length = len; req->buf = kmalloc(len, flags); if (!req->buf) { pr_err("%s: request buf allocation failed\n", __func__); usb_ep_free_request(ep, req); return 0; } return req; } static int gsmd_alloc_requests(struct usb_ep *ep, struct list_head *head, int num, int size, void (*cb)(struct usb_ep *ep, struct usb_request *)) { int i; struct usb_request *req; pr_debug("%s: ep:%p head:%p num:%d size:%d cb:%p", __func__, ep, head, num, size, cb); for (i = 0; i < num; i++) { req = gsmd_alloc_req(ep, size, GFP_ATOMIC); if (!req) { pr_debug("%s: req allocated:%d\n", __func__, i); return list_empty(head) ? -ENOMEM : 0; } req->complete = cb; list_add(&req->list, head); } return 0; } static void gsmd_start_rx(struct gsmd_port *port) { struct list_head *pool; struct usb_ep *out; unsigned long flags; int ret; if (!port) { pr_err("%s: port is null\n", __func__); return; } spin_lock_irqsave(&port->port_lock, flags); if (!port->port_usb) { pr_debug("%s: USB disconnected\n", __func__); goto start_rx_end; } pool = &port->read_pool; out = port->port_usb->out; while (test_bit(CH_OPENED, &port->pi->flags) && !list_empty(pool)) { struct usb_request *req; req = list_entry(pool->next, struct usb_request, list); list_del(&req->list); req->length = SMD_RX_BUF_SIZE; spin_unlock_irqrestore(&port->port_lock, flags); ret = usb_ep_queue(out, req, GFP_KERNEL); spin_lock_irqsave(&port->port_lock, flags); if (ret) { pr_err("%s: usb ep out queue failed" "port:%p, port#%d\n", __func__, port, port->port_num); list_add_tail(&req->list, pool); break; } } start_rx_end: spin_unlock_irqrestore(&port->port_lock, flags); } static void gsmd_rx_push(struct work_struct *w) { struct gsmd_port *port = container_of(w, struct gsmd_port, push); struct smd_port_info *pi = port->pi; struct list_head *q; pr_debug("%s: port:%p port#%d", __func__, port, port->port_num); spin_lock_irq(&port->port_lock); q = &port->read_queue; while (pi->ch && !list_empty(q)) { struct usb_request *req; int avail; req = list_first_entry(q, struct usb_request, list); switch (req->status) { case -ESHUTDOWN: pr_debug("%s: req status shutdown portno#%d port:%p\n", __func__, port->port_num, port); goto rx_push_end; default: pr_warning("%s: port:%p port#%d" " Unexpected Rx Status:%d\n", __func__, port, port->port_num, req->status); case 0: /* normal completion */ break; } avail = smd_write_avail(pi->ch); if (!avail) goto rx_push_end; if (req->actual) { char *packet = req->buf; unsigned size = req->actual; unsigned n; int count; n = port->n_read; if (n) { packet += n; size -= n; } count = smd_write(pi->ch, packet, size); if (count < 0) { pr_err("%s: smd write failed err:%d\n", __func__, count); goto rx_push_end; } if (count != size) { port->n_read += count; goto rx_push_end; } port->nbytes_tomodem += count; } port->n_read = 0; list_move(&req->list, &port->read_pool); } rx_push_end: spin_unlock_irq(&port->port_lock); gsmd_start_rx(port); } static void gsmd_read_pending(struct gsmd_port *port) { int avail; if (!port || !port->pi->ch) return; /* passing null buffer discards the data */ while ((avail = smd_read_avail(port->pi->ch))) smd_read(port->pi->ch, 0, avail); return; } static void gsmd_tx_pull(struct work_struct *w) { struct gsmd_port *port = container_of(w, struct gsmd_port, pull); struct list_head *pool = &port->write_pool; struct smd_port_info *pi = port->pi; struct usb_ep *in; pr_debug("%s: port:%p port#%d pool:%p\n", __func__, port, port->port_num, pool); spin_lock_irq(&port->port_lock); if (!port->port_usb) { pr_debug("%s: usb is disconnected\n", __func__); spin_unlock_irq(&port->port_lock); gsmd_read_pending(port); return; } in = port->port_usb->in; while (pi->ch && !list_empty(pool)) { struct usb_request *req; int avail; int ret; avail = smd_read_avail(pi->ch); if (!avail) break; avail = avail > SMD_TX_BUF_SIZE ? SMD_TX_BUF_SIZE : avail; req = list_entry(pool->next, struct usb_request, list); list_del(&req->list); req->length = smd_read(pi->ch, req->buf, avail); spin_unlock_irq(&port->port_lock); ret = usb_ep_queue(in, req, GFP_KERNEL); spin_lock_irq(&port->port_lock); if (ret) { pr_err("%s: usb ep out queue failed" "port:%p, port#%d err:%d\n", __func__, port, port->port_num, ret); /* could be usb disconnected */ if (!port->port_usb) gsmd_free_req(in, req); else list_add(&req->list, pool); goto tx_pull_end; } port->nbytes_tolaptop += req->length; } tx_pull_end: /* TBD: Check how code behaves on USB bus suspend */ if (port->port_usb && smd_read_avail(port->pi->ch) && !list_empty(pool)) queue_work(gsmd_wq, &port->pull); spin_unlock_irq(&port->port_lock); return; } static void gsmd_read_complete(struct usb_ep *ep, struct usb_request *req) { struct gsmd_port *port = ep->driver_data; pr_debug("%s: ep:%p port:%p\n", __func__, ep, port); if (!port) { pr_err("%s: port is null\n", __func__); return; } spin_lock(&port->port_lock); if (!test_bit(CH_OPENED, &port->pi->flags) || req->status == -ESHUTDOWN) { spin_unlock(&port->port_lock); gsmd_free_req(ep, req); return; } list_add_tail(&req->list, &port->read_queue); queue_work(gsmd_wq, &port->push); spin_unlock(&port->port_lock); return; } static void gsmd_write_complete(struct usb_ep *ep, struct usb_request *req) { struct gsmd_port *port = ep->driver_data; pr_debug("%s: ep:%p port:%p\n", __func__, ep, port); if (!port) { pr_err("%s: port is null\n", __func__); return; } spin_lock(&port->port_lock); if (!test_bit(CH_OPENED, &port->pi->flags) || req->status == -ESHUTDOWN) { spin_unlock(&port->port_lock); gsmd_free_req(ep, req); return; } if (req->status) pr_warning("%s: port:%p port#%d unexpected %s status %d\n", __func__, port, port->port_num, ep->name, req->status); list_add(&req->list, &port->write_pool); queue_work(gsmd_wq, &port->pull); spin_unlock(&port->port_lock); return; } static void gsmd_start_io(struct gsmd_port *port) { int ret = -ENODEV; pr_debug("%s: port: %p\n", __func__, port); spin_lock(&port->port_lock); if (!port->port_usb) goto start_io_out; smd_tiocmset_from_cb(port->pi->ch, port->cbits_to_modem, ~port->cbits_to_modem); ret = gsmd_alloc_requests(port->port_usb->out, &port->read_pool, SMD_RX_QUEUE_SIZE, SMD_RX_BUF_SIZE, gsmd_read_complete); if (ret) { pr_err("%s: unable to allocate out requests\n", __func__); goto start_io_out; } ret = gsmd_alloc_requests(port->port_usb->in, &port->write_pool, SMD_TX_QUEUE_SIZE, SMD_TX_BUF_SIZE, gsmd_write_complete); if (ret) { gsmd_free_requests(port->port_usb->out, &port->read_pool); pr_err("%s: unable to allocate IN requests\n", __func__); goto start_io_out; } start_io_out: spin_unlock(&port->port_lock); if (ret) return; gsmd_start_rx(port); } static unsigned int convert_uart_sigs_to_acm(unsigned uart_sig) { unsigned int acm_sig = 0; /* should this needs to be in calling functions ??? */ uart_sig &= (TIOCM_RI | TIOCM_CD | TIOCM_DSR); if (uart_sig & TIOCM_RI) acm_sig |= SMD_ACM_CTRL_RI; if (uart_sig & TIOCM_CD) acm_sig |= SMD_ACM_CTRL_DCD; if (uart_sig & TIOCM_DSR) acm_sig |= SMD_ACM_CTRL_DSR; return acm_sig; } static unsigned int convert_acm_sigs_to_uart(unsigned acm_sig) { unsigned int uart_sig = 0; /* should this needs to be in calling functions ??? */ acm_sig &= (SMD_ACM_CTRL_DTR | SMD_ACM_CTRL_RTS); if (acm_sig & SMD_ACM_CTRL_DTR) uart_sig |= TIOCM_DTR; if (acm_sig & SMD_ACM_CTRL_RTS) uart_sig |= TIOCM_RTS; return uart_sig; } static void gsmd_stop_io(struct gsmd_port *port) { struct usb_ep *in; struct usb_ep *out; unsigned long flags; spin_lock_irqsave(&port->port_lock, flags); if (!port->port_usb) { spin_unlock_irqrestore(&port->port_lock, flags); return; } in = port->port_usb->in; out = port->port_usb->out; spin_unlock_irqrestore(&port->port_lock, flags); usb_ep_fifo_flush(in); usb_ep_fifo_flush(out); spin_lock(&port->port_lock); if (port->port_usb) { gsmd_free_requests(out, &port->read_pool); gsmd_free_requests(out, &port->read_queue); gsmd_free_requests(in, &port->write_pool); port->n_read = 0; port->cbits_to_laptop = 0; } if (port->port_usb->send_modem_ctrl_bits) port->port_usb->send_modem_ctrl_bits( port->port_usb, port->cbits_to_laptop); spin_unlock(&port->port_lock); } static void gsmd_notify(void *priv, unsigned event) { struct gsmd_port *port = priv; struct smd_port_info *pi = port->pi; int i; switch (event) { case SMD_EVENT_DATA: pr_debug("%s: Event data\n", __func__); if (smd_read_avail(pi->ch)) queue_work(gsmd_wq, &port->pull); if (smd_write_avail(pi->ch)) queue_work(gsmd_wq, &port->push); break; case SMD_EVENT_OPEN: pr_debug("%s: Event Open\n", __func__); set_bit(CH_OPENED, &pi->flags); gsmd_start_io(port); break; case SMD_EVENT_CLOSE: pr_debug("%s: Event Close\n", __func__); clear_bit(CH_OPENED, &pi->flags); gsmd_stop_io(port); break; case SMD_EVENT_STATUS: i = smd_tiocmget(port->pi->ch); port->cbits_to_laptop = convert_uart_sigs_to_acm(i); if (port->port_usb && port->port_usb->send_modem_ctrl_bits) port->port_usb->send_modem_ctrl_bits(port->port_usb, port->cbits_to_laptop); break; } } static void gsmd_connect_work(struct work_struct *w) { struct gsmd_port *port; struct smd_port_info *pi; int ret; port = container_of(w, struct gsmd_port, connect_work.work); pi = port->pi; pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num); if (!test_bit(CH_READY, &pi->flags)) return; ret = smd_named_open_on_edge(pi->name, SMD_APPS_MODEM, &pi->ch, port, gsmd_notify); if (ret) { if (ret == -EAGAIN) { /* port not ready - retry */ pr_debug("%s: SMD port not ready - rescheduling:%s err:%d\n", __func__, pi->name, ret); queue_delayed_work(gsmd_wq, &port->connect_work, msecs_to_jiffies(250)); } else { pr_err("%s: unable to open smd port:%s err:%d\n", __func__, pi->name, ret); } } } static void gsmd_disconnect_work(struct work_struct *w) { struct gsmd_port *port; struct smd_port_info *pi; port = container_of(w, struct gsmd_port, disconnect_work); pi = port->pi; pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num); smd_close(port->pi->ch); port->pi->ch = NULL; } static void gsmd_notify_modem(void *gptr, u8 portno, int ctrl_bits) { struct gsmd_port *port; int temp; struct gserial *gser = gptr; if (portno >= n_smd_ports) { pr_err("%s: invalid portno#%d\n", __func__, portno); return; } if (!gser) { pr_err("%s: gser is null\n", __func__); return; } port = smd_ports[portno].port; temp = convert_acm_sigs_to_uart(ctrl_bits); if (temp == port->cbits_to_modem) return; port->cbits_to_modem = temp; /* usb could send control signal before smd is ready */ if (!test_bit(CH_OPENED, &port->pi->flags)) return; /* if DTR is high, update latest modem info to laptop */ if (port->cbits_to_modem & TIOCM_DTR) { unsigned i; i = smd_tiocmget(port->pi->ch); port->cbits_to_laptop = convert_uart_sigs_to_acm(i); if (gser->send_modem_ctrl_bits) gser->send_modem_ctrl_bits( port->port_usb, port->cbits_to_laptop); } smd_tiocmset(port->pi->ch, port->cbits_to_modem, ~port->cbits_to_modem); } int gsmd_connect(struct gserial *gser, u8 portno) { unsigned long flags; int ret; struct gsmd_port *port; pr_debug("%s: gserial:%p portno:%u\n", __func__, gser, portno); if (portno >= n_smd_ports) { pr_err("%s: Invalid port no#%d", __func__, portno); return -EINVAL; } if (!gser) { pr_err("%s: gser is null\n", __func__); return -EINVAL; } port = smd_ports[portno].port; spin_lock_irqsave(&port->port_lock, flags); port->port_usb = gser; gser->notify_modem = gsmd_notify_modem; port->nbytes_tomodem = 0; port->nbytes_tolaptop = 0; spin_unlock_irqrestore(&port->port_lock, flags); ret = usb_ep_enable(gser->in); if (ret) { pr_err("%s: usb_ep_enable failed eptype:IN ep:%p", __func__, gser->in); port->port_usb = 0; return ret; } gser->in->driver_data = port; ret = usb_ep_enable(gser->out); if (ret) { pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p", __func__, gser->out); port->port_usb = 0; gser->in->driver_data = 0; return ret; } gser->out->driver_data = port; queue_delayed_work(gsmd_wq, &port->connect_work, msecs_to_jiffies(0)); return 0; } void gsmd_disconnect(struct gserial *gser, u8 portno) { unsigned long flags; struct gsmd_port *port; pr_debug("%s: gserial:%p portno:%u\n", __func__, gser, portno); if (portno >= n_smd_ports) { pr_err("%s: invalid portno#%d\n", __func__, portno); return; } if (!gser) { pr_err("%s: gser is null\n", __func__); return; } port = smd_ports[portno].port; spin_lock_irqsave(&port->port_lock, flags); port->port_usb = 0; spin_unlock_irqrestore(&port->port_lock, flags); /* disable endpoints, aborting down any active I/O */ usb_ep_disable(gser->out); gser->out->driver_data = NULL; usb_ep_disable(gser->in); gser->in->driver_data = NULL; spin_lock_irqsave(&port->port_lock, flags); gsmd_free_requests(gser->out, &port->read_pool); gsmd_free_requests(gser->out, &port->read_queue); gsmd_free_requests(gser->in, &port->write_pool); port->n_read = 0; spin_unlock_irqrestore(&port->port_lock, flags); if (test_and_clear_bit(CH_OPENED, &port->pi->flags)) { /* lower the dtr */ port->cbits_to_modem = 0; smd_tiocmset(port->pi->ch, port->cbits_to_modem, ~port->cbits_to_modem); } if (port->pi->ch) queue_work(gsmd_wq, &port->disconnect_work); } #define SMD_CH_MAX_LEN 20 static int gsmd_ch_probe(struct platform_device *pdev) { struct gsmd_port *port; struct smd_port_info *pi; int i; unsigned long flags; pr_debug("%s: name:%s\n", __func__, pdev->name); for (i = 0; i < n_smd_ports; i++) { port = smd_ports[i].port; pi = port->pi; if (!strncmp(pi->name, pdev->name, SMD_CH_MAX_LEN)) { set_bit(CH_READY, &pi->flags); spin_lock_irqsave(&port->port_lock, flags); if (port->port_usb) queue_delayed_work(gsmd_wq, &port->connect_work, msecs_to_jiffies(0)); spin_unlock_irqrestore(&port->port_lock, flags); break; } } return 0; } static int gsmd_ch_remove(struct platform_device *pdev) { struct gsmd_port *port; struct smd_port_info *pi; int i; pr_debug("%s: name:%s\n", __func__, pdev->name); for (i = 0; i < n_smd_ports; i++) { port = smd_ports[i].port; pi = port->pi; if (!strncmp(pi->name, pdev->name, SMD_CH_MAX_LEN)) { clear_bit(CH_READY, &pi->flags); clear_bit(CH_OPENED, &pi->flags); if (pi->ch) { smd_close(pi->ch); pi->ch = NULL; } break; } } return 0; } static void gsmd_port_free(int portno) { struct gsmd_port *port = smd_ports[portno].port; if (!port) kfree(port); } static int gsmd_port_alloc(int portno, struct usb_cdc_line_coding *coding) { struct gsmd_port *port; struct platform_driver *pdrv; port = kzalloc(sizeof(struct gsmd_port), GFP_KERNEL); if (!port) return -ENOMEM; port->port_num = portno; port->pi = &smd_pi[portno]; spin_lock_init(&port->port_lock); INIT_LIST_HEAD(&port->read_pool); INIT_LIST_HEAD(&port->read_queue); INIT_WORK(&port->push, gsmd_rx_push); INIT_LIST_HEAD(&port->write_pool); INIT_WORK(&port->pull, gsmd_tx_pull); INIT_DELAYED_WORK(&port->connect_work, gsmd_connect_work); INIT_WORK(&port->disconnect_work, gsmd_disconnect_work); smd_ports[portno].port = port; pdrv = &smd_ports[portno].pdrv; pdrv->probe = gsmd_ch_probe; pdrv->remove = gsmd_ch_remove; pdrv->driver.name = port->pi->name; pdrv->driver.owner = THIS_MODULE; platform_driver_register(pdrv); pr_debug("%s: port:%p portno:%d\n", __func__, port, portno); return 0; } #if defined(CONFIG_DEBUG_FS) static ssize_t debug_smd_read_stats(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { struct gsmd_port *port; struct smd_port_info *pi; char *buf; unsigned long flags; int temp = 0; int i; int ret; buf = kzalloc(sizeof(char) * 512, GFP_KERNEL); if (!buf) return -ENOMEM; for (i = 0; i < n_smd_ports; i++) { port = smd_ports[i].port; pi = port->pi; spin_lock_irqsave(&port->port_lock, flags); temp += scnprintf(buf + temp, 512 - temp, "###PORT:%d###\n" "nbytes_tolaptop: %lu\n" "nbytes_tomodem: %lu\n" "cbits_to_modem: %u\n" "cbits_to_laptop: %u\n" "n_read: %u\n" "smd_read_avail: %d\n" "smd_write_avail: %d\n" "CH_OPENED: %d\n" "CH_READY: %d\n", i, port->nbytes_tolaptop, port->nbytes_tomodem, port->cbits_to_modem, port->cbits_to_laptop, port->n_read, pi->ch ? smd_read_avail(pi->ch) : 0, pi->ch ? smd_write_avail(pi->ch) : 0, test_bit(CH_OPENED, &pi->flags), test_bit(CH_READY, &pi->flags)); spin_unlock_irqrestore(&port->port_lock, flags); } ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp); kfree(buf); return ret; } static ssize_t debug_smd_reset_stats(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct gsmd_port *port; unsigned long flags; int i; for (i = 0; i < n_smd_ports; i++) { port = smd_ports[i].port; spin_lock_irqsave(&port->port_lock, flags); port->nbytes_tolaptop = 0; port->nbytes_tomodem = 0; spin_unlock_irqrestore(&port->port_lock, flags); } return count; } static int debug_smd_open(struct inode *inode, struct file *file) { return 0; } static const struct file_operations debug_gsmd_ops = { .open = debug_smd_open, .read = debug_smd_read_stats, .write = debug_smd_reset_stats, }; static void gsmd_debugfs_init(void) { struct dentry *dent; dent = debugfs_create_dir("usb_gsmd", 0); if (IS_ERR(dent)) return; debugfs_create_file("status", 0444, dent, 0, &debug_gsmd_ops); } #else static void gsmd_debugfs_init(void) {} #endif int gsmd_setup(struct usb_gadget *g, unsigned count) { struct usb_cdc_line_coding coding; int ret; int i; pr_debug("%s: g:%p count: %d\n", __func__, g, count); if (!count || count > SMD_N_PORTS) { pr_err("%s: Invalid num of ports count:%d gadget:%p\n", __func__, count, g); return -EINVAL; } coding.dwDTERate = cpu_to_le32(9600); coding.bCharFormat = 8; coding.bParityType = USB_CDC_NO_PARITY; coding.bDataBits = USB_CDC_1_STOP_BITS; gsmd_wq = create_singlethread_workqueue("k_gsmd"); if (!gsmd_wq) { pr_err("%s: Unable to create workqueue gsmd_wq\n", __func__); return -ENOMEM; } for (i = 0; i < count; i++) { mutex_init(&smd_ports[i].lock); n_smd_ports++; ret = gsmd_port_alloc(i, &coding); if (ret) { n_smd_ports--; pr_err("%s: Unable to alloc port:%d\n", __func__, i); goto free_smd_ports; } } gsmd_debugfs_init(); return 0; free_smd_ports: for (i = 0; i < n_smd_ports; i++) gsmd_port_free(i); destroy_workqueue(gsmd_wq); return ret; } void gsmd_cleanup(struct usb_gadget *g, unsigned count) { /* TBD */ }
gpl-2.0
YUPlayGodDev/android_kernel_cyanogen_msm8994
security/keys/permission.c
2820
2933
/* Key permission checking * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/security.h> #include "internal.h" /** * key_task_permission - Check a key can be used * @key_ref: The key to check. * @cred: The credentials to use. * @perm: The permissions to check for. * * Check to see whether permission is granted to use a key in the desired way, * but permit the security modules to override. * * The caller must hold either a ref on cred or must hold the RCU readlock. * * Returns 0 if successful, -EACCES if access is denied based on the * permissions bits or the LSM check. */ int key_task_permission(const key_ref_t key_ref, const struct cred *cred, key_perm_t perm) { struct key *key; key_perm_t kperm; int ret; key = key_ref_to_ptr(key_ref); /* use the second 8-bits of permissions for keys the caller owns */ if (uid_eq(key->uid, cred->fsuid)) { kperm = key->perm >> 16; goto use_these_perms; } /* use the third 8-bits of permissions for keys the caller has a group * membership in common with */ if (gid_valid(key->gid) && key->perm & KEY_GRP_ALL) { if (gid_eq(key->gid, cred->fsgid)) { kperm = key->perm >> 8; goto use_these_perms; } ret = groups_search(cred->group_info, key->gid); if (ret) { kperm = key->perm >> 8; goto use_these_perms; } } /* otherwise use the least-significant 8-bits */ kperm = key->perm; use_these_perms: /* use the top 8-bits of permissions for keys the caller possesses * - possessor permissions are additive with other permissions */ if (is_key_possessed(key_ref)) kperm |= key->perm >> 24; kperm = kperm & perm & KEY_ALL; if (kperm != perm) return -EACCES; /* let LSM be the final arbiter */ return security_key_permission(key_ref, cred, perm); } EXPORT_SYMBOL(key_task_permission); /** * key_validate - Validate a key. * @key: The key to be validated. * * Check that a key is valid, returning 0 if the key is okay, -ENOKEY if the * key is invalidated, -EKEYREVOKED if the key's type has been removed or if * the key has been revoked or -EKEYEXPIRED if the key has expired. */ int key_validate(const struct key *key) { unsigned long flags = key->flags; if (flags & (1 << KEY_FLAG_INVALIDATED)) return -ENOKEY; /* check it's still accessible */ if (flags & ((1 << KEY_FLAG_REVOKED) | (1 << KEY_FLAG_DEAD))) return -EKEYREVOKED; /* check it hasn't expired */ if (key->expiry) { struct timespec now = current_kernel_time(); if (now.tv_sec >= key->expiry) return -EKEYEXPIRED; } return 0; } EXPORT_SYMBOL(key_validate);
gpl-2.0
Snuzzo/PLUS_kernel
net/rfkill/rfkill-gpio.c
2820
6082
/* * Copyright (c) 2011, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/rfkill.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/rfkill-gpio.h> enum rfkill_gpio_clk_state { UNSPECIFIED = 0, PWR_ENABLED, PWR_DISABLED }; #define PWR_CLK_SET(_RF, _EN) \ ((_RF)->pwr_clk_enabled = (!(_EN) ? PWR_ENABLED : PWR_DISABLED)) #define PWR_CLK_ENABLED(_RF) ((_RF)->pwr_clk_enabled == PWR_ENABLED) #define PWR_CLK_DISABLED(_RF) ((_RF)->pwr_clk_enabled != PWR_ENABLED) struct rfkill_gpio_data { struct rfkill_gpio_platform_data *pdata; struct rfkill *rfkill_dev; char *reset_name; char *shutdown_name; enum rfkill_gpio_clk_state pwr_clk_enabled; struct clk *pwr_clk; }; static int rfkill_gpio_set_power(void *data, bool blocked) { struct rfkill_gpio_data *rfkill = data; if (blocked) { if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) gpio_direction_output(rfkill->pdata->shutdown_gpio, 0); if (gpio_is_valid(rfkill->pdata->reset_gpio)) gpio_direction_output(rfkill->pdata->reset_gpio, 0); if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill)) clk_disable(rfkill->pwr_clk); } else { if (rfkill->pwr_clk && PWR_CLK_DISABLED(rfkill)) clk_enable(rfkill->pwr_clk); if (gpio_is_valid(rfkill->pdata->reset_gpio)) gpio_direction_output(rfkill->pdata->reset_gpio, 1); if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) gpio_direction_output(rfkill->pdata->shutdown_gpio, 1); } if (rfkill->pwr_clk) PWR_CLK_SET(rfkill, blocked); return 0; } static const struct rfkill_ops rfkill_gpio_ops = { .set_block = rfkill_gpio_set_power, }; static int rfkill_gpio_probe(struct platform_device *pdev) { struct rfkill_gpio_data *rfkill; struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data; int ret = 0; int len = 0; if (!pdata) { pr_warn("%s: No platform data specified\n", __func__); return -EINVAL; } /* make sure at-least one of the GPIO is defined and that * a name is specified for this instance */ if (!pdata->name || (!gpio_is_valid(pdata->reset_gpio) && !gpio_is_valid(pdata->shutdown_gpio))) { pr_warn("%s: invalid platform data\n", __func__); return -EINVAL; } rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); if (!rfkill) return -ENOMEM; rfkill->pdata = pdata; len = strlen(pdata->name); rfkill->reset_name = kzalloc(len + 7, GFP_KERNEL); if (!rfkill->reset_name) { ret = -ENOMEM; goto fail_alloc; } rfkill->shutdown_name = kzalloc(len + 10, GFP_KERNEL); if (!rfkill->shutdown_name) { ret = -ENOMEM; goto fail_reset_name; } snprintf(rfkill->reset_name, len + 6 , "%s_reset", pdata->name); snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", pdata->name); if (pdata->power_clk_name) { rfkill->pwr_clk = clk_get(&pdev->dev, pdata->power_clk_name); if (IS_ERR(rfkill->pwr_clk)) { pr_warn("%s: can't find pwr_clk.\n", __func__); goto fail_shutdown_name; } } if (gpio_is_valid(pdata->reset_gpio)) { ret = gpio_request(pdata->reset_gpio, rfkill->reset_name); if (ret) { pr_warn("%s: failed to get reset gpio.\n", __func__); goto fail_clock; } } if (gpio_is_valid(pdata->shutdown_gpio)) { ret = gpio_request(pdata->shutdown_gpio, rfkill->shutdown_name); if (ret) { pr_warn("%s: failed to get shutdown gpio.\n", __func__); goto fail_reset; } } rfkill->rfkill_dev = rfkill_alloc(pdata->name, &pdev->dev, pdata->type, &rfkill_gpio_ops, rfkill); if (!rfkill->rfkill_dev) goto fail_shutdown; ret = rfkill_register(rfkill->rfkill_dev); if (ret < 0) goto fail_rfkill; platform_set_drvdata(pdev, rfkill); dev_info(&pdev->dev, "%s device registered.\n", pdata->name); return 0; fail_rfkill: rfkill_destroy(rfkill->rfkill_dev); fail_shutdown: if (gpio_is_valid(pdata->shutdown_gpio)) gpio_free(pdata->shutdown_gpio); fail_reset: if (gpio_is_valid(pdata->reset_gpio)) gpio_free(pdata->reset_gpio); fail_clock: if (rfkill->pwr_clk) clk_put(rfkill->pwr_clk); fail_shutdown_name: kfree(rfkill->shutdown_name); fail_reset_name: kfree(rfkill->reset_name); fail_alloc: kfree(rfkill); return ret; } static int rfkill_gpio_remove(struct platform_device *pdev) { struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev); rfkill_unregister(rfkill->rfkill_dev); rfkill_destroy(rfkill->rfkill_dev); if (gpio_is_valid(rfkill->pdata->shutdown_gpio)) gpio_free(rfkill->pdata->shutdown_gpio); if (gpio_is_valid(rfkill->pdata->reset_gpio)) gpio_free(rfkill->pdata->reset_gpio); if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill)) clk_disable(rfkill->pwr_clk); if (rfkill->pwr_clk) clk_put(rfkill->pwr_clk); kfree(rfkill->shutdown_name); kfree(rfkill->reset_name); kfree(rfkill); return 0; } static struct platform_driver rfkill_gpio_driver = { .probe = rfkill_gpio_probe, .remove = __devexit_p(rfkill_gpio_remove), .driver = { .name = "rfkill_gpio", .owner = THIS_MODULE, }, }; static int __init rfkill_gpio_init(void) { return platform_driver_register(&rfkill_gpio_driver); } static void __exit rfkill_gpio_exit(void) { platform_driver_unregister(&rfkill_gpio_driver); } module_init(rfkill_gpio_init); module_exit(rfkill_gpio_exit); MODULE_DESCRIPTION("gpio rfkill"); MODULE_AUTHOR("NVIDIA"); MODULE_LICENSE("GPL");
gpl-2.0
superr/android_kernel_lge_w5c
arch/arm/mach-msm/msm_mem_hole.c
3332
1102
/* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /** * This module exists for the express purpose of removing memory * via the msm memory-remove mechanism (see * Documentation/devicetree/bindings/arm/msm/memory-reserve.txt). Compiling * this module into a kernel is essentially the means by which any * nodes in the device tree with compatible = * "qcom,msm-mem-hole" will be "activated", thus providing a * convenient mechanism for enabling/disabling memory removal * (qcom,memory-*). */ #include <linux/module.h> #define MSM_MEM_HOLE_COMPAT_STR "qcom,msm-mem-hole" EXPORT_COMPAT(MSM_MEM_HOLE_COMPAT_STR);
gpl-2.0
wolverine2k/android_kernel_oppo_n1
drivers/cpuidle/driver.c
4356
2214
/* * driver.c - driver support * * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * Shaohua Li <shaohua.li@intel.com> * Adam Belay <abelay@novell.com> * * This code is licenced under the GPL. */ #include <linux/mutex.h> #include <linux/module.h> #include <linux/cpuidle.h> #include "cpuidle.h" static struct cpuidle_driver *cpuidle_curr_driver; DEFINE_SPINLOCK(cpuidle_driver_lock); static void __cpuidle_register_driver(struct cpuidle_driver *drv) { int i; /* * cpuidle driver should set the drv->power_specified bit * before registering if the driver provides * power_usage numbers. * * If power_specified is not set, * we fill in power_usage with decreasing values as the * cpuidle code has an implicit assumption that state Cn * uses less power than C(n-1). * * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned * an power value of -1. So we use -2, -3, etc, for other * c-states. */ if (!drv->power_specified) { for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) drv->states[i].power_usage = -1 - i; } } /** * cpuidle_register_driver - registers a driver * @drv: the driver */ int cpuidle_register_driver(struct cpuidle_driver *drv) { if (!drv || !drv->state_count) return -EINVAL; if (cpuidle_disabled()) return -ENODEV; spin_lock(&cpuidle_driver_lock); if (cpuidle_curr_driver) { spin_unlock(&cpuidle_driver_lock); return -EBUSY; } __cpuidle_register_driver(drv); cpuidle_curr_driver = drv; spin_unlock(&cpuidle_driver_lock); return 0; } EXPORT_SYMBOL_GPL(cpuidle_register_driver); /** * cpuidle_get_driver - return the current driver */ struct cpuidle_driver *cpuidle_get_driver(void) { return cpuidle_curr_driver; } EXPORT_SYMBOL_GPL(cpuidle_get_driver); /** * cpuidle_unregister_driver - unregisters a driver * @drv: the driver */ void cpuidle_unregister_driver(struct cpuidle_driver *drv) { if (drv != cpuidle_curr_driver) { WARN(1, "invalid cpuidle_unregister_driver(%s)\n", drv->name); return; } spin_lock(&cpuidle_driver_lock); cpuidle_curr_driver = NULL; spin_unlock(&cpuidle_driver_lock); } EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
gpl-2.0
tbalden/android_kernel_htc_m7-sense4.3
drivers/staging/iio/accel/adis16209_trigger.c
5124
1884
#include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/export.h> #include "../iio.h" #include "../trigger.h" #include "adis16209.h" /** * adis16209_data_rdy_trig_poll() the event handler for the data rdy trig **/ static irqreturn_t adis16209_data_rdy_trig_poll(int irq, void *trig) { iio_trigger_poll(trig, iio_get_time_ns()); return IRQ_HANDLED; } /** * adis16209_data_rdy_trigger_set_state() set datardy interrupt state **/ static int adis16209_data_rdy_trigger_set_state(struct iio_trigger *trig, bool state) { struct iio_dev *indio_dev = trig->private_data; dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state); return adis16209_set_irq(indio_dev, state); } static const struct iio_trigger_ops adis16209_trigger_ops = { .owner = THIS_MODULE, .set_trigger_state = &adis16209_data_rdy_trigger_set_state, }; int adis16209_probe_trigger(struct iio_dev *indio_dev) { int ret; struct adis16209_state *st = iio_priv(indio_dev); st->trig = iio_allocate_trigger("adis16209-dev%d", indio_dev->id); if (st->trig == NULL) { ret = -ENOMEM; goto error_ret; } ret = request_irq(st->us->irq, adis16209_data_rdy_trig_poll, IRQF_TRIGGER_RISING, "adis16209", st->trig); if (ret) goto error_free_trig; st->trig->dev.parent = &st->us->dev; st->trig->ops = &adis16209_trigger_ops; st->trig->private_data = indio_dev; ret = iio_trigger_register(st->trig); /* select default trigger */ indio_dev->trig = st->trig; if (ret) goto error_free_irq; return 0; error_free_irq: free_irq(st->us->irq, st->trig); error_free_trig: iio_free_trigger(st->trig); error_ret: return ret; } void adis16209_remove_trigger(struct iio_dev *indio_dev) { struct adis16209_state *st = iio_priv(indio_dev); iio_trigger_unregister(st->trig); free_irq(st->us->irq, st->trig); iio_free_trigger(st->trig); }
gpl-2.0
johnnyslt/kernel_zte_v967s
drivers/staging/iio/accel/adis16203_trigger.c
5124
1669
#include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/export.h> #include "../iio.h" #include "../trigger.h" #include "adis16203.h" /** * adis16203_data_rdy_trigger_set_state() set datardy interrupt state **/ static int adis16203_data_rdy_trigger_set_state(struct iio_trigger *trig, bool state) { struct iio_dev *indio_dev = trig->private_data; dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state); return adis16203_set_irq(indio_dev, state); } static const struct iio_trigger_ops adis16203_trigger_ops = { .owner = THIS_MODULE, .set_trigger_state = &adis16203_data_rdy_trigger_set_state, }; int adis16203_probe_trigger(struct iio_dev *indio_dev) { int ret; struct adis16203_state *st = iio_priv(indio_dev); st->trig = iio_allocate_trigger("adis16203-dev%d", indio_dev->id); if (st->trig == NULL) { ret = -ENOMEM; goto error_ret; } ret = request_irq(st->us->irq, &iio_trigger_generic_data_rdy_poll, IRQF_TRIGGER_RISING, "adis16203", st->trig); if (ret) goto error_free_trig; st->trig->dev.parent = &st->us->dev; st->trig->ops = &adis16203_trigger_ops; st->trig->private_data = indio_dev; ret = iio_trigger_register(st->trig); /* select default trigger */ indio_dev->trig = st->trig; if (ret) goto error_free_irq; return 0; error_free_irq: free_irq(st->us->irq, st->trig); error_free_trig: iio_free_trigger(st->trig); error_ret: return ret; } void adis16203_remove_trigger(struct iio_dev *indio_dev) { struct adis16203_state *st = iio_priv(indio_dev); iio_trigger_unregister(st->trig); free_irq(st->us->irq, st->trig); iio_free_trigger(st->trig); }
gpl-2.0
VegaDevTeam/android_kernel_pantech_ef52s
arch/arm/mach-ixp4xx/coyote-pci.c
5380
1613
/* * arch/arm/mach-ixp4xx/coyote-pci.c * * PCI setup routines for ADI Engineering Coyote platform * * Copyright (C) 2002 Jungo Software Technologies. * Copyright (C) 2003 MontaVista Softwrae, Inc. * * Maintainer: Deepak Saxena <dsaxena@mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/irq.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/pci.h> #define SLOT0_DEVID 14 #define SLOT1_DEVID 15 /* PCI controller GPIO to IRQ pin mappings */ #define SLOT0_INTA 6 #define SLOT1_INTA 11 void __init coyote_pci_preinit(void) { irq_set_irq_type(IXP4XX_GPIO_IRQ(SLOT0_INTA), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(SLOT1_INTA), IRQ_TYPE_LEVEL_LOW); ixp4xx_pci_preinit(); } static int __init coyote_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (slot == SLOT0_DEVID) return IXP4XX_GPIO_IRQ(SLOT0_INTA); else if (slot == SLOT1_DEVID) return IXP4XX_GPIO_IRQ(SLOT1_INTA); else return -1; } struct hw_pci coyote_pci __initdata = { .nr_controllers = 1, .preinit = coyote_pci_preinit, .swizzle = pci_std_swizzle, .setup = ixp4xx_setup, .scan = ixp4xx_scan_bus, .map_irq = coyote_map_irq, }; int __init coyote_pci_init(void) { if (machine_is_adi_coyote()) pci_common_init(&coyote_pci); return 0; } subsys_initcall(coyote_pci_init);
gpl-2.0
thestealth131205/k2_u-ul
drivers/ide/ide_platform.c
5636
3627
/* * Platform IDE driver * * Copyright (C) 2007 MontaVista Software * * Maintainer: Kumar Gala <galak@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/ide.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/ata_platform.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/io.h> static void __devinit plat_ide_setup_ports(struct ide_hw *hw, void __iomem *base, void __iomem *ctrl, struct pata_platform_info *pdata, int irq) { unsigned long port = (unsigned long)base; int i; hw->io_ports.data_addr = port; port += (1 << pdata->ioport_shift); for (i = 1; i <= 7; i++, port += (1 << pdata->ioport_shift)) hw->io_ports_array[i] = port; hw->io_ports.ctl_addr = (unsigned long)ctrl; hw->irq = irq; } static const struct ide_port_info platform_ide_port_info = { .host_flags = IDE_HFLAG_NO_DMA, .chipset = ide_generic, }; static int __devinit plat_ide_probe(struct platform_device *pdev) { struct resource *res_base, *res_alt, *res_irq; void __iomem *base, *alt_base; struct pata_platform_info *pdata; struct ide_host *host; int ret = 0, mmio = 0; struct ide_hw hw, *hws[] = { &hw }; struct ide_port_info d = platform_ide_port_info; pdata = pdev->dev.platform_data; /* get a pointer to the register memory */ res_base = platform_get_resource(pdev, IORESOURCE_IO, 0); res_alt = platform_get_resource(pdev, IORESOURCE_IO, 1); if (!res_base || !res_alt) { res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); res_alt = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res_base || !res_alt) { ret = -ENOMEM; goto out; } mmio = 1; } res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res_irq) { ret = -EINVAL; goto out; } if (mmio) { base = devm_ioremap(&pdev->dev, res_base->start, resource_size(res_base)); alt_base = devm_ioremap(&pdev->dev, res_alt->start, resource_size(res_alt)); } else { base = devm_ioport_map(&pdev->dev, res_base->start, resource_size(res_base)); alt_base = devm_ioport_map(&pdev->dev, res_alt->start, resource_size(res_alt)); } memset(&hw, 0, sizeof(hw)); plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); hw.dev = &pdev->dev; d.irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE) d.irq_flags |= IRQF_SHARED; if (mmio) d.host_flags |= IDE_HFLAG_MMIO; ret = ide_host_add(&d, hws, 1, &host); if (ret) goto out; platform_set_drvdata(pdev, host); return 0; out: return ret; } static int __devexit plat_ide_remove(struct platform_device *pdev) { struct ide_host *host = dev_get_drvdata(&pdev->dev); ide_host_remove(host); return 0; } static struct platform_driver platform_ide_driver = { .driver = { .name = "pata_platform", .owner = THIS_MODULE, }, .probe = plat_ide_probe, .remove = __devexit_p(plat_ide_remove), }; static int __init platform_ide_init(void) { return platform_driver_register(&platform_ide_driver); } static void __exit platform_ide_exit(void) { platform_driver_unregister(&platform_ide_driver); } MODULE_DESCRIPTION("Platform IDE driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pata_platform"); module_init(platform_ide_init); module_exit(platform_ide_exit);
gpl-2.0
n-soda/linux
net/ipv4/raw.c
5
16474
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * RAW - implementation of IP "raw" sockets. * * Version: $Id: raw.c,v 1.63.2.1 2002/03/05 12:47:34 davem Exp $ * * Authors: Ross Biro, <bir7@leland.Stanford.Edu> * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * * Fixes: * Alan Cox : verify_area() fixed up * Alan Cox : ICMP error handling * Alan Cox : EMSGSIZE if you send too big a packet * Alan Cox : Now uses generic datagrams and shared * skbuff library. No more peek crashes, * no more backlogs * Alan Cox : Checks sk->broadcast. * Alan Cox : Uses skb_free_datagram/skb_copy_datagram * Alan Cox : Raw passes ip options too * Alan Cox : Setsocketopt added * Alan Cox : Fixed error return for broadcasts * Alan Cox : Removed wake_up calls * Alan Cox : Use ttl/tos * Alan Cox : Cleaned up old debugging * Alan Cox : Use new kernel side addresses * Arnt Gulbrandsen : Fixed MSG_DONTROUTE in raw sockets. * Alan Cox : BSD style RAW socket demultiplexing. * Alan Cox : Beginnings of mrouted support. * Alan Cox : Added IP_HDRINCL option. * Alan Cox : Skip broadcast check if BSDism set. * David S. Miller : New socket lookup architecture. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/config.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/ioctls.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/mroute.h> #include <net/ip.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/icmp.h> #include <net/udp.h> #include <net/raw.h> #include <net/inet_common.h> #include <net/checksum.h> struct sock *raw_v4_htable[RAWV4_HTABLE_SIZE]; rwlock_t raw_v4_lock = RW_LOCK_UNLOCKED; static void raw_v4_hash(struct sock *sk) { struct sock **skp = &raw_v4_htable[sk->num & (RAWV4_HTABLE_SIZE - 1)]; write_lock_bh(&raw_v4_lock); if ((sk->next = *skp) != NULL) (*skp)->pprev = &sk->next; *skp = sk; sk->pprev = skp; sock_prot_inc_use(sk->prot); sock_hold(sk); write_unlock_bh(&raw_v4_lock); } static void raw_v4_unhash(struct sock *sk) { write_lock_bh(&raw_v4_lock); if (sk->pprev) { if (sk->next) sk->next->pprev = sk->pprev; *sk->pprev = sk->next; sk->pprev = NULL; sock_prot_dec_use(sk->prot); __sock_put(sk); } write_unlock_bh(&raw_v4_lock); } struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num, unsigned long raddr, unsigned long laddr, int dif) { struct sock *s = sk; for (s = sk; s; s = s->next) { if (s->num == num && !(s->daddr && s->daddr != raddr) && !(s->rcv_saddr && s->rcv_saddr != laddr) && !(s->bound_dev_if && s->bound_dev_if != dif)) break; /* gotcha */ } return s; } /* * 0 - deliver * 1 - block */ static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb) { int type; type = skb->h.icmph->type; if (type < 32) { __u32 data = sk->tp_pinfo.tp_raw4.filter.data; return ((1 << type) & data) != 0; } /* Do not block unknown ICMP types */ return 0; } /* IP input processing comes here for RAW socket delivery. * This is fun as to avoid copies we want to make no surplus * copies. * * RFC 1122: SHOULD pass TOS value up to the transport layer. * -> It does. And not only TOS, but all IP header. */ struct sock *raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash) { struct sock *sk; read_lock(&raw_v4_lock); if ((sk = raw_v4_htable[hash]) == NULL) goto out; sk = __raw_v4_lookup(sk, iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex); while (sk) { struct sock *sknext = __raw_v4_lookup(sk->next, iph->protocol, iph->saddr, iph->daddr, skb->dev->ifindex); if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) { struct sk_buff *clone; if (!sknext) break; clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) raw_rcv(sk, clone); } sk = sknext; } out: if (sk) sock_hold(sk); read_unlock(&raw_v4_lock); return sk; } void raw_err (struct sock *sk, struct sk_buff *skb, u32 info) { int type = skb->h.icmph->type; int code = skb->h.icmph->code; int err = 0; int harderr = 0; /* Report error on raw socket, if: 1. User requested ip_recverr. 2. Socket is connected (otherwise the error indication is useless without ip_recverr and error is hard. */ if (!sk->protinfo.af_inet.recverr && sk->state != TCP_ESTABLISHED) return; switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: return; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: err = EHOSTUNREACH; if (code > NR_ICMP_UNREACH) break; err = icmp_err_convert[code].errno; harderr = icmp_err_convert[code].fatal; if (code == ICMP_FRAG_NEEDED) { harderr = sk->protinfo.af_inet.pmtudisc != IP_PMTUDISC_DONT; err = EMSGSIZE; } } if (sk->protinfo.af_inet.recverr) { struct iphdr *iph = (struct iphdr*)skb->data; u8 *payload = skb->data + (iph->ihl << 2); if (sk->protinfo.af_inet.hdrincl) payload = skb->data; ip_icmp_error(sk, skb, err, 0, info, payload); } if (sk->protinfo.af_inet.recverr || harderr) { sk->err = err; sk->error_report(sk); } } static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) { /* Charge it to the socket. */ if (sock_queue_rcv_skb(sk, skb) < 0) { IP_INC_STATS(IpInDiscards); kfree_skb(skb); return NET_RX_DROP; } IP_INC_STATS(IpInDelivers); return NET_RX_SUCCESS; } int raw_rcv(struct sock *sk, struct sk_buff *skb) { skb_push(skb, skb->data - skb->nh.raw); raw_rcv_skb(sk, skb); return 0; } struct rawfakehdr { struct iovec *iov; u32 saddr; struct dst_entry *dst; }; /* * Send a RAW IP packet. */ /* * Callback support is trivial for SOCK_RAW */ static int raw_getfrag(const void *p, char *to, unsigned int offset, unsigned int fraglen, struct sk_buff *skb) { struct rawfakehdr *rfh = (struct rawfakehdr *) p; return memcpy_fromiovecend(to, rfh->iov, offset, fraglen); } /* * IPPROTO_RAW needs extra work. */ static int raw_getrawfrag(const void *p, char *to, unsigned int offset, unsigned int fraglen, struct sk_buff *skb) { struct rawfakehdr *rfh = (struct rawfakehdr *) p; if (memcpy_fromiovecend(to, rfh->iov, offset, fraglen)) return -EFAULT; if (!offset) { struct iphdr *iph = (struct iphdr *)to; if (!iph->saddr) iph->saddr = rfh->saddr; iph->check = 0; iph->tot_len = htons(fraglen); /* This is right as you can't frag RAW packets */ /* * Deliberate breach of modularity to keep * ip_build_xmit clean (well less messy). */ if (!iph->id) ip_select_ident(iph, rfh->dst, NULL); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } return 0; } static int raw_sendmsg(struct sock *sk, struct msghdr *msg, int len) { struct ipcm_cookie ipc; struct rawfakehdr rfh; struct rtable *rt = NULL; int free = 0; u32 daddr; u8 tos; int err; /* This check is ONLY to check for arithmetic overflow on integer(!) len. Not more! Real check will be made in ip_build_xmit --ANK BTW socket.c -> af_*.c -> ... make multiple invalid conversions size_t -> int. We MUST repair it f.e. by replacing all of them with size_t and revise all the places sort of len += sizeof(struct iphdr) If len was ULONG_MAX-10 it would be cathastrophe --ANK */ err = -EMSGSIZE; if (len < 0 || len > 0xFFFF) goto out; /* * Check the flags. */ err = -EOPNOTSUPP; if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message */ goto out; /* compatibility */ /* * Get and verify the address. */ if (msg->msg_namelen) { struct sockaddr_in *usin = (struct sockaddr_in*)msg->msg_name; err = -EINVAL; if (msg->msg_namelen < sizeof(*usin)) goto out; if (usin->sin_family != AF_INET) { static int complained; if (!complained++) printk(KERN_INFO "%s forgot to set AF_INET in " "raw sendmsg. Fix it!\n", current->comm); err = -EINVAL; if (usin->sin_family) goto out; } daddr = usin->sin_addr.s_addr; /* ANK: I did not forget to get protocol from port field. * I just do not know, who uses this weirdness. * IP_HDRINCL is much more convenient. */ } else { err = -EDESTADDRREQ; if (sk->state != TCP_ESTABLISHED) goto out; daddr = sk->daddr; } ipc.addr = sk->saddr; ipc.opt = NULL; ipc.oif = sk->bound_dev_if; if (msg->msg_controllen) { err = ip_cmsg_send(msg, &ipc); if (err) goto out; if (ipc.opt) free = 1; } rfh.saddr = ipc.addr; ipc.addr = daddr; if (!ipc.opt) ipc.opt = sk->protinfo.af_inet.opt; if (ipc.opt) { err = -EINVAL; /* Linux does not mangle headers on raw sockets, * so that IP options + IP_HDRINCL is non-sense. */ if (sk->protinfo.af_inet.hdrincl) goto done; if (ipc.opt->srr) { if (!daddr) goto done; daddr = ipc.opt->faddr; } } tos = RT_TOS(sk->protinfo.af_inet.tos) | sk->localroute; if (msg->msg_flags & MSG_DONTROUTE) tos |= RTO_ONLINK; if (MULTICAST(daddr)) { if (!ipc.oif) ipc.oif = sk->protinfo.af_inet.mc_index; if (!rfh.saddr) rfh.saddr = sk->protinfo.af_inet.mc_addr; } err = ip_route_output(&rt, daddr, rfh.saddr, tos, ipc.oif); if (err) goto done; err = -EACCES; if (rt->rt_flags & RTCF_BROADCAST && !sk->broadcast) goto done; if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; back_from_confirm: rfh.iov = msg->msg_iov; rfh.saddr = rt->rt_src; rfh.dst = &rt->u.dst; if (!ipc.addr) ipc.addr = rt->rt_dst; err = ip_build_xmit(sk, sk->protinfo.af_inet.hdrincl ? raw_getrawfrag : raw_getfrag, &rfh, len, &ipc, rt, msg->msg_flags); done: if (free) kfree(ipc.opt); ip_rt_put(rt); out: return err < 0 ? err : len; do_confirm: dst_confirm(&rt->u.dst); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; goto done; } static void raw_close(struct sock *sk, long timeout) { /* * Raw sockets may have direct kernel refereneces. Kill them. */ ip_ra_control(sk, 0, NULL); inet_sock_release(sk); } /* This gets rid of all the nasties in af_inet. -DaveM */ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; int ret = -EINVAL; int chk_addr_ret; if (sk->state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) goto out; chk_addr_ret = inet_addr_type(addr->sin_addr.s_addr); ret = -EADDRNOTAVAIL; if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) goto out; sk->rcv_saddr = sk->saddr = addr->sin_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) sk->saddr = 0; /* Use device */ sk_dst_reset(sk); ret = 0; out: return ret; } /* * This should be easy, if there is something there * we return it, otherwise we block. */ int raw_recvmsg(struct sock *sk, struct msghdr *msg, int len, int noblock, int flags, int *addr_len) { int copied = 0; int err = -EOPNOTSUPP; struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (addr_len) *addr_len = sizeof(*sin); if (flags & MSG_ERRQUEUE) { err = ip_recv_error(sk, msg, len); goto out; } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_addr.s_addr = skb->nh.iph->saddr; memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (sk->protinfo.af_inet.cmsg_flags) ip_cmsg_recv(msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: return err ? : copied; } static int raw_init(struct sock *sk) { struct raw_opt *tp = &(sk->tp_pinfo.tp_raw4); if (sk->num == IPPROTO_ICMP) memset(&tp->filter, 0, sizeof(tp->filter)); return 0; } static int raw_seticmpfilter(struct sock *sk, char *optval, int optlen) { if (optlen > sizeof(struct icmp_filter)) optlen = sizeof(struct icmp_filter); if (copy_from_user(&sk->tp_pinfo.tp_raw4.filter, optval, optlen)) return -EFAULT; return 0; } static int raw_geticmpfilter(struct sock *sk, char *optval, int *optlen) { int len, ret = -EFAULT; if (get_user(len, optlen)) goto out; ret = -EINVAL; if (len < 0) goto out; if (len > sizeof(struct icmp_filter)) len = sizeof(struct icmp_filter); ret = -EFAULT; if (put_user(len, optlen) || copy_to_user(optval, &sk->tp_pinfo.tp_raw4.filter, len)) goto out; ret = 0; out: return ret; } static int raw_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen) { if (level != SOL_RAW) return ip_setsockopt(sk, level, optname, optval, optlen); if (optname == ICMP_FILTER) { if (sk->num != IPPROTO_ICMP) return -EOPNOTSUPP; else return raw_seticmpfilter(sk, optval, optlen); } return -ENOPROTOOPT; } static int raw_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen) { if (level != SOL_RAW) return ip_getsockopt(sk, level, optname, optval, optlen); if (optname == ICMP_FILTER) { if (sk->num != IPPROTO_ICMP) return -EOPNOTSUPP; else return raw_geticmpfilter(sk, optval, optlen); } return -ENOPROTOOPT; } static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: { int amount = atomic_read(&sk->wmem_alloc); return put_user(amount, (int *)arg); } case SIOCINQ: { struct sk_buff *skb; int amount = 0; spin_lock_irq(&sk->receive_queue.lock); skb = skb_peek(&sk->receive_queue); if (skb != NULL) amount = skb->len; spin_unlock_irq(&sk->receive_queue.lock); return put_user(amount, (int *)arg); } default: #ifdef CONFIG_IP_MROUTE return ipmr_ioctl(sk, cmd, arg); #else return -ENOIOCTLCMD; #endif } } static void get_raw_sock(struct sock *sp, char *tmpbuf, int i) { unsigned int dest = sp->daddr, src = sp->rcv_saddr; __u16 destp = 0, srcp = sp->num; sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p", i, src, srcp, dest, destp, sp->state, atomic_read(&sp->wmem_alloc), atomic_read(&sp->rmem_alloc), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->refcnt), sp); } int raw_get_info(char *buffer, char **start, off_t offset, int length) { int len = 0, num = 0, i; off_t pos = 128; off_t begin; char tmpbuf[129]; if (offset < 128) len += sprintf(buffer, "%-127s\n", " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode"); read_lock(&raw_v4_lock); for (i = 0; i < RAWV4_HTABLE_SIZE; i++) { struct sock *sk; for (sk = raw_v4_htable[i]; sk; sk = sk->next, num++) { if (sk->family != PF_INET) continue; pos += 128; if (pos <= offset) continue; get_raw_sock(sk, tmpbuf, i); len += sprintf(buffer + len, "%-127s\n", tmpbuf); if (len >= length) goto out; } } out: read_unlock(&raw_v4_lock); begin = len - (pos - offset); *start = buffer + begin; len -= begin; if (len > length) len = length; if (len < 0) len = 0; return len; } struct proto raw_prot = { name: "RAW", close: raw_close, connect: udp_connect, disconnect: udp_disconnect, ioctl: raw_ioctl, init: raw_init, setsockopt: raw_setsockopt, getsockopt: raw_getsockopt, sendmsg: raw_sendmsg, recvmsg: raw_recvmsg, bind: raw_bind, backlog_rcv: raw_rcv_skb, hash: raw_v4_hash, unhash: raw_v4_unhash, };
gpl-2.0
walac/kde-workspace
plasma/netbook/shell/scripting/panel.cpp
5
4084
/* * Copyright 2009 Aaron Seigo <aseigo@kde.org> * Copyright 2010 Marco Martin <notmart@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Library General Public License as * published by the Free Software Foundation; either version 2, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details * * You should have received a copy of the GNU Library General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "panel.h" #include <Plasma/Corona> #include <Plasma/Containment> #include "netview.h" #include "plasmaapp.h" #include <plasmagenericshell/scripting/scriptengine.h> #include <plasmagenericshell/scripting/widget.h> namespace WorkspaceScripting { NetPanel::NetPanel(Plasma::Containment *containment, QObject *parent) : Containment(containment, parent) { } NetPanel::~NetPanel() { } QString NetPanel::location() const { Plasma::Containment *c = containment(); if (!c) { return "floating"; } switch (c->location()) { case Plasma::Floating: return "floating"; break; case Plasma::Desktop: return "desktop"; break; case Plasma::FullScreen: return "fullscreen"; break; case Plasma::TopEdge: return "top"; break; case Plasma::BottomEdge: return "bottom"; break; case Plasma::LeftEdge: return "left"; break; case Plasma::RightEdge: return "right"; break; } return "floating"; } void NetPanel::setLocation(const QString &locationString) { Plasma::Containment *c = containment(); if (!c) { return; } const QString lower = locationString.toLower(); Plasma::Location loc = Plasma::Floating; if (lower == "desktop") { loc = Plasma::Desktop; } else if (lower == "fullscreen") { loc = Plasma::FullScreen; } else if (lower == "top") { loc = Plasma::TopEdge; } else if (lower == "bottom") { loc = Plasma::BottomEdge; } else if (lower == "left") { loc = Plasma::LeftEdge; } else if (lower == "right") { loc = Plasma::RightEdge; } c->setLocation(loc); } NetView *NetPanel::panel() const { Plasma::Containment *c = containment(); if (!c) { return 0; } return PlasmaApp::self()->controlBar(); } int NetPanel::height() const { Plasma::Containment *c = containment(); if (!c) { return 0; } return c->formFactor() == Plasma::Vertical ? c->size().width() : c->size().height(); } void NetPanel::setHeight(int height) { Plasma::Containment *c = containment(); if (height < 16 || !c) { return; } NetView *v = panel(); if (v) { QRect screen = c->corona()->screenGeometry(v->screen()); QSizeF size = c->size(); const int max = (c->formFactor() == Plasma::Vertical ? screen.width() : screen.height()) / 3; height = qBound(16, height, max); if (c->formFactor() == Plasma::Vertical) { size.setWidth(height); } else { size.setHeight(height); } c->resize(size); c->setMinimumSize(size); c->setMaximumSize(size); } } bool NetPanel::autoHide() const { NetView *v = panel(); if (v) { return v->autoHide(); } return false; } void NetPanel::setAutoHide(const bool autoHide) { NetView *v = panel(); if (v && autoHide != v->autoHide()) { v->setAutoHide(autoHide); } } } #include "panel.moc"
gpl-2.0
TheTypoMaster/ubuntu-utopic
sound/pci/emu10k1/emu10k1.c
5
9021
/* * The driver for the EMU10K1 (SB Live!) based soundcards * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * Copyright (c) by James Courtier-Dutton <James@superbug.demon.co.uk> * Added support for Audigy 2 Value. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * */ #include <linux/init.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/module.h> #include <sound/core.h> #include <sound/emu10k1.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("EMU10K1"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB Live!/PCI512/E-mu APS}," "{Creative Labs,SB Audigy}}"); #if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE)) #define ENABLE_SYNTH #include <sound/emu10k1_synth.h> #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static int extin[SNDRV_CARDS]; static int extout[SNDRV_CARDS]; static int seq_ports[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 4}; static int max_synth_voices[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 64}; static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128}; static bool enable_ir[SNDRV_CARDS]; static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */ static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2}; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the EMU10K1 soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable the EMU10K1 soundcard."); module_param_array(extin, int, NULL, 0444); MODULE_PARM_DESC(extin, "Available external inputs for FX8010. Zero=default."); module_param_array(extout, int, NULL, 0444); MODULE_PARM_DESC(extout, "Available external outputs for FX8010. Zero=default."); module_param_array(seq_ports, int, NULL, 0444); MODULE_PARM_DESC(seq_ports, "Allocated sequencer ports for internal synthesizer."); module_param_array(max_synth_voices, int, NULL, 0444); MODULE_PARM_DESC(max_synth_voices, "Maximum number of voices for WaveTable."); module_param_array(max_buffer_size, int, NULL, 0444); MODULE_PARM_DESC(max_buffer_size, "Maximum sample buffer size in MB."); module_param_array(enable_ir, bool, NULL, 0444); MODULE_PARM_DESC(enable_ir, "Enable IR."); module_param_array(subsystem, uint, NULL, 0444); MODULE_PARM_DESC(subsystem, "Force card subsystem model."); module_param_array(delay_pcm_irq, uint, NULL, 0444); MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0)."); /* * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400 */ static DEFINE_PCI_DEVICE_TABLE(snd_emu10k1_ids) = { { PCI_VDEVICE(CREATIVE, 0x0002), 0 }, /* EMU10K1 */ { PCI_VDEVICE(CREATIVE, 0x0004), 1 }, /* Audigy */ { PCI_VDEVICE(CREATIVE, 0x0008), 1 }, /* Audigy 2 Value SB0400 */ { 0, } }; /* * Audigy 2 Value notes: * A_IOCFG Input (GPIO) * 0x400 = Front analog jack plugged in. (Green socket) * 0x1000 = Read analog jack plugged in. (Black socket) * 0x2000 = Center/LFE analog jack plugged in. (Orange socket) * A_IOCFG Output (GPIO) * 0x60 = Sound out of front Left. * Win sets it to 0xXX61 */ MODULE_DEVICE_TABLE(pci, snd_emu10k1_ids); static int snd_card_emu10k1_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_emu10k1 *emu; #ifdef ENABLE_SYNTH struct snd_seq_device *wave = NULL; #endif int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; if (max_buffer_size[dev] < 32) max_buffer_size[dev] = 32; else if (max_buffer_size[dev] > 1024) max_buffer_size[dev] = 1024; if ((err = snd_emu10k1_create(card, pci, extin[dev], extout[dev], (long)max_buffer_size[dev] * 1024 * 1024, enable_ir[dev], subsystem[dev], &emu)) < 0) goto error; card->private_data = emu; emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f; if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0) goto error; if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0) goto error; if ((err = snd_emu10k1_pcm_efx(emu, 2, NULL)) < 0) goto error; /* This stores the periods table. */ if (emu->card_capabilities->ca0151_chip) { /* P16V */ if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), 1024, &emu->p16v_buffer)) < 0) goto error; } if ((err = snd_emu10k1_mixer(emu, 0, 3)) < 0) goto error; if ((err = snd_emu10k1_timer(emu, 0)) < 0) goto error; if ((err = snd_emu10k1_pcm_multi(emu, 3, NULL)) < 0) goto error; if (emu->card_capabilities->ca0151_chip) { /* P16V */ if ((err = snd_p16v_pcm(emu, 4, NULL)) < 0) goto error; } if (emu->audigy) { if ((err = snd_emu10k1_audigy_midi(emu)) < 0) goto error; } else { if ((err = snd_emu10k1_midi(emu)) < 0) goto error; } if ((err = snd_emu10k1_fx8010_new(emu, 0, NULL)) < 0) goto error; #ifdef ENABLE_SYNTH if (snd_seq_device_new(card, 1, SNDRV_SEQ_DEV_ID_EMU10K1_SYNTH, sizeof(struct snd_emu10k1_synth_arg), &wave) < 0 || wave == NULL) { dev_warn(emu->card->dev, "can't initialize Emu10k1 wavetable synth\n"); } else { struct snd_emu10k1_synth_arg *arg; arg = SNDRV_SEQ_DEVICE_ARGPTR(wave); strcpy(wave->name, "Emu-10k1 Synth"); arg->hwptr = emu; arg->index = 1; arg->seq_ports = seq_ports[dev]; arg->max_voices = max_synth_voices[dev]; } #endif strlcpy(card->driver, emu->card_capabilities->driver, sizeof(card->driver)); strlcpy(card->shortname, emu->card_capabilities->name, sizeof(card->shortname)); snprintf(card->longname, sizeof(card->longname), "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i", card->shortname, emu->revision, emu->serial, emu->port, emu->irq); if ((err = snd_card_register(card)) < 0) goto error; pci_set_drvdata(pci, card); dev++; return 0; error: snd_card_free(card); return err; } static void snd_card_emu10k1_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); } #ifdef CONFIG_PM_SLEEP static int snd_emu10k1_suspend(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct snd_emu10k1 *emu = card->private_data; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); emu->suspend = 1; snd_pcm_suspend_all(emu->pcm); snd_pcm_suspend_all(emu->pcm_mic); snd_pcm_suspend_all(emu->pcm_efx); snd_pcm_suspend_all(emu->pcm_multi); snd_pcm_suspend_all(emu->pcm_p16v); snd_ac97_suspend(emu->ac97); snd_emu10k1_efx_suspend(emu); snd_emu10k1_suspend_regs(emu); if (emu->card_capabilities->ca0151_chip) snd_p16v_suspend(emu); snd_emu10k1_done(emu); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, PCI_D3hot); return 0; } static int snd_emu10k1_resume(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct snd_emu10k1 *emu = card->private_data; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { dev_err(dev, "pci_enable_device failed, disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_emu10k1_resume_init(emu); snd_emu10k1_efx_resume(emu); snd_ac97_resume(emu->ac97); snd_emu10k1_resume_regs(emu); if (emu->card_capabilities->ca0151_chip) snd_p16v_resume(emu); emu->suspend = 0; snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } static SIMPLE_DEV_PM_OPS(snd_emu10k1_pm, snd_emu10k1_suspend, snd_emu10k1_resume); #define SND_EMU10K1_PM_OPS &snd_emu10k1_pm #else #define SND_EMU10K1_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ static struct pci_driver emu10k1_driver = { .name = KBUILD_MODNAME, .id_table = snd_emu10k1_ids, .probe = snd_card_emu10k1_probe, .remove = snd_card_emu10k1_remove, .driver = { .pm = SND_EMU10K1_PM_OPS, }, }; module_pci_driver(emu10k1_driver);
gpl-2.0
visi0nary/mediatek
mt6732/kernel/kernel/sched/debug.c
5
30632
/* * kernel/sched/debug.c * * Print the CFS rbtree * * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/kallsyms.h> #include <linux/utsname.h> #ifdef CONFIG_KGDB_KDB #include <linux/kdb.h> #endif #include "sched.h" //#define TEST_SCHED_DEBUG_ENHANCEMENT //#define MTK_SCHED_CMP_PRINT #define TRYLOCK_NUM 10 #include <linux/delay.h> static DEFINE_SPINLOCK(sched_debug_lock); /* * This allows printing both to /proc/sched_debug and * to the console */ #ifndef CONFIG_KGDB_KDB #define SEQ_printf(m, x...) \ do { \ if (m) \ seq_printf(m, x); \ else \ printk(x); \ } while (0) #else #define SEQ_printf(m, x...) \ do { \ if (m) \ seq_printf(m, x); \ else if (__get_cpu_var(kdb_in_use) == 1) \ kdb_printf(x); \ else \ printk(x); \ } while (0) #endif /* * Ease the printing of nsec fields: */ static long long nsec_high(unsigned long long nsec) { if ((long long)nsec < 0) { nsec = -nsec; do_div(nsec, 1000000); return -nsec; } do_div(nsec, 1000000); return nsec; } static unsigned long nsec_low(unsigned long long nsec) { if ((long long)nsec < 0) nsec = -nsec; return do_div(nsec, 1000000); } #define SPLIT_NS(x) nsec_high(x), nsec_low(x) #ifdef CONFIG_FAIR_GROUP_SCHED static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) { struct sched_entity *se = tg->se[cpu]; #define P(F) \ SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) #define PN(F) \ SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) if (!se) { struct sched_avg *avg = &cpu_rq(cpu)->avg; P(avg->runnable_avg_sum); P(avg->runnable_avg_period); #ifdef MTK_SCHED_CMP_PRINT # ifdef CONFIG_MTK_SCHED_CMP /* usage_avg_sum & load_avg_ratio are based on Linaro 12.11 */ P(avg->usage_avg_sum); P(avg->load_avg_ratio); # endif P(avg->last_runnable_update); #endif return; } PN(se->exec_start); PN(se->vruntime); PN(se->sum_exec_runtime); #ifdef CONFIG_SCHEDSTATS PN(se->statistics.wait_start); PN(se->statistics.sleep_start); PN(se->statistics.block_start); PN(se->statistics.sleep_max); PN(se->statistics.block_max); PN(se->statistics.exec_max); PN(se->statistics.slice_max); PN(se->statistics.wait_max); PN(se->statistics.wait_sum); P(se->statistics.wait_count); #endif P(se->load.weight); #ifdef CONFIG_SMP P(se->avg.runnable_avg_sum); P(se->avg.runnable_avg_period); P(se->avg.usage_avg_sum); P(se->avg.load_avg_contrib); P(se->avg.decay_count); # ifdef MTK_SCHED_CMP_PRINT # ifdef CONFIG_MTK_SCHED_CMP /* usage_avg_sum & load_avg_ratio are based on Linaro 12.11 */ P(se->avg.usage_avg_sum); P(se->avg.load_avg_ratio); # endif P(se->avg.last_runnable_update); # endif #endif #undef PN #undef P } #endif #ifdef CONFIG_CGROUP_SCHED static char group_path[PATH_MAX]; static char *task_group_path(struct task_group *tg) { if (autogroup_path(tg, group_path, PATH_MAX)) return group_path; cgroup_path(tg->css.cgroup, group_path, PATH_MAX); return group_path; } #endif static void print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) { if (rq->curr == p) SEQ_printf(m, "R"); else SEQ_printf(m, " "); SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", p->comm, p->pid, SPLIT_NS(p->se.vruntime), (long long)(p->nvcsw + p->nivcsw), p->prio); #ifdef CONFIG_SCHEDSTATS SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", SPLIT_NS(p->se.vruntime), SPLIT_NS(p->se.sum_exec_runtime), SPLIT_NS(p->se.statistics.sum_sleep_runtime)); #else SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); #endif #ifdef CONFIG_CGROUP_SCHED SEQ_printf(m, " %s", task_group_path(task_group(p))); #endif SEQ_printf(m, "\n"); } static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) { struct task_struct *g, *p; unsigned long flags; SEQ_printf(m, "\nrunnable tasks:\n" " task PID tree-key switches prio" " exec-runtime sum-exec sum-sleep\n" "------------------------------------------------------" "----------------------------------------------------\n"); read_lock_irqsave(&tasklist_lock, flags); do_each_thread(g, p) { if (!p->on_rq || task_cpu(p) != rq_cpu) continue; print_task(m, rq, p); } while_each_thread(g, p); read_unlock_irqrestore(&tasklist_lock, flags); } void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) { s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, spread, rq0_min_vruntime, spread0; struct rq *rq = cpu_rq(cpu); struct sched_entity *last; unsigned long flags; #ifdef CONFIG_FAIR_GROUP_SCHED SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); #else SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); #endif SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", SPLIT_NS(cfs_rq->exec_clock)); raw_spin_lock_irqsave(&rq->lock, flags); if (cfs_rq->rb_leftmost) MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; last = __pick_last_entity(cfs_rq); if (last) max_vruntime = last->vruntime; min_vruntime = cfs_rq->min_vruntime; rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; raw_spin_unlock_irqrestore(&rq->lock, flags); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", SPLIT_NS(MIN_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", SPLIT_NS(min_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", SPLIT_NS(max_vruntime)); spread = max_vruntime - MIN_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread)); spread0 = min_vruntime - rq0_min_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", SPLIT_NS(spread0)); SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", cfs_rq->nr_spread_over); SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); #ifdef CONFIG_SMP SEQ_printf(m, " .%-30s: %ld\n", "runnable_load_avg", cfs_rq->runnable_load_avg); SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg", cfs_rq->blocked_load_avg); #ifdef CONFIG_FAIR_GROUP_SCHED SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib", cfs_rq->tg_load_contrib); SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", cfs_rq->tg_runnable_contrib); SEQ_printf(m, " .%-30s: %ld\n", "tg->load_avg", atomic_long_read(&cfs_rq->tg->load_avg)); SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg", atomic_read(&cfs_rq->tg->runnable_avg)); SEQ_printf(m, " .%-30s: %d\n", "tg->usage_avg", atomic_read(&cfs_rq->tg->usage_avg)); #endif #ifdef CONFIG_CFS_BANDWIDTH SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active", cfs_rq->tg->cfs_bandwidth.timer_active); SEQ_printf(m, " .%-30s: %d\n", "throttled", cfs_rq->throttled); SEQ_printf(m, " .%-30s: %d\n", "throttle_count", cfs_rq->throttle_count); #endif #ifdef CONFIG_FAIR_GROUP_SCHED print_cfs_group_stats(m, cpu, cfs_rq->tg); #endif #endif } void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) { #ifdef CONFIG_RT_GROUP_SCHED SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); #else SEQ_printf(m, "\nrt_rq[%d]:\n", cpu); #endif #define P(x) \ SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) #define PN(x) \ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) P(rt_nr_running); P(rt_throttled); PN(rt_time); PN(rt_runtime); #undef PN #undef P } extern __read_mostly int sched_clock_running; static void print_cpu(struct seq_file *m, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; #ifdef CONFIG_X86 { unsigned int freq = cpu_khz ? : 1; SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", cpu, freq / 1000, (freq % 1000)); } #else SEQ_printf(m, "cpu#%d: %s\n", cpu, cpu_is_offline(cpu)?"Offline":"Online"); #endif #define P(x) \ do { \ if (sizeof(rq->x) == 4) \ SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ else \ SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\ } while (0) #define PN(x) \ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) P(nr_running); SEQ_printf(m, " .%-30s: %lu\n", "load", rq->load.weight); P(nr_switches); P(nr_load_updates); P(nr_uninterruptible); PN(next_balance); P(curr->pid); PN(clock); P(cpu_load[0]); P(cpu_load[1]); P(cpu_load[2]); P(cpu_load[3]); P(cpu_load[4]); #undef P #undef PN #ifdef CONFIG_SCHEDSTATS #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); P(yld_count); P(sched_count); P(sched_goidle); #ifdef CONFIG_SMP P64(avg_idle); #endif P(ttwu_count); P(ttwu_local); #undef P #undef P64 #endif spin_lock_irqsave(&sched_debug_lock, flags); print_cfs_stats(m, cpu); print_rt_stats(m, cpu); rcu_read_lock(); print_rq(m, rq, cpu); rcu_read_unlock(); spin_unlock_irqrestore(&sched_debug_lock, flags); SEQ_printf(m, "\n"); } static const char *sched_tunable_scaling_names[] = { "none", "logaritmic", "linear" }; #ifdef TEST_SCHED_DEBUG_ENHANCEMENT extern void lock_timekeeper(void); #endif static void sched_debug_header(struct seq_file *m) { u64 ktime, sched_clk, cpu_clk; unsigned long flags; #ifdef TEST_SCHED_DEBUG_ENHANCEMENT static int i=0; i++; if(i==10){ struct rq *rq = cpu_rq(0); //lock_timekeeper(); raw_spin_lock_irq(&rq->lock); spin_lock_irqsave(&sched_debug_lock, flags); write_lock_irqsave(&tasklist_lock, flags); BUG_ON(1); } #endif local_irq_save(flags); ktime = ktime_to_ns(ktime_get()); sched_clk = sched_clock(); cpu_clk = local_clock(); local_irq_restore(flags); SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n", init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); #define P(x) \ SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x)) #define PN(x) \ SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) PN(ktime); PN(sched_clk); PN(cpu_clk); P(jiffies); #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK P(sched_clock_stable); #endif #undef PN #undef P SEQ_printf(m, "\n"); SEQ_printf(m, "sysctl_sched\n"); #define P(x) \ SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) #define PN(x) \ SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) PN(sysctl_sched_latency); PN(sysctl_sched_min_granularity); PN(sysctl_sched_wakeup_granularity); P(sysctl_sched_child_runs_first); P(sysctl_sched_features); #undef PN #undef P SEQ_printf(m, " .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling", sysctl_sched_tunable_scaling, sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); SEQ_printf(m, "\n"); } static int sched_debug_show(struct seq_file *m, void *v) { int cpu = (unsigned long)(v - 2); unsigned long flags; if (cpu != -1) { read_lock_irqsave(&tasklist_lock, flags); print_cpu(m, cpu); read_unlock_irqrestore(&tasklist_lock, flags); SEQ_printf(m, "\n"); } else sched_debug_header(m); return 0; } void sysrq_sched_debug_show(void) { int cpu; unsigned long flags; sched_debug_header(NULL); read_lock_irqsave(&tasklist_lock, flags); //for_each_online_cpu(cpu) for_each_possible_cpu(cpu) print_cpu(NULL, cpu); read_unlock_irqrestore(&tasklist_lock, flags); } /* * This itererator needs some explanation. * It returns 1 for the header position. * This means 2 is cpu 0. * In a hotplugged system some cpus, including cpu 0, may be missing so we have * to use cpumask_* to iterate over the cpus. */ static void *sched_debug_start(struct seq_file *file, loff_t *offset) { unsigned long n = *offset; if (n == 0) return (void *) 1; n--; if (n > 0) n = cpumask_next(n - 1, cpu_online_mask); else n = cpumask_first(cpu_online_mask); *offset = n + 1; if (n < nr_cpu_ids) return (void *)(unsigned long)(n + 2); return NULL; } static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset) { (*offset)++; return sched_debug_start(file, offset); } static void sched_debug_stop(struct seq_file *file, void *data) { } static const struct seq_operations sched_debug_sops = { .start = sched_debug_start, .next = sched_debug_next, .stop = sched_debug_stop, .show = sched_debug_show, }; static int sched_debug_release(struct inode *inode, struct file *file) { seq_release(inode, file); return 0; } static int sched_debug_open(struct inode *inode, struct file *filp) { int ret = 0; ret = seq_open(filp, &sched_debug_sops); return ret; } static const struct file_operations sched_debug_fops = { .open = sched_debug_open, .read = seq_read, .llseek = seq_lseek, .release = sched_debug_release, }; static int __init init_sched_debug_procfs(void) { struct proc_dir_entry *pe; pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops); if (!pe) return -ENOMEM; return 0; } __initcall(init_sched_debug_procfs); void proc_sched_show_task(struct task_struct *p, struct seq_file *m) { unsigned long nr_switches; SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, get_nr_threads(p)); SEQ_printf(m, "---------------------------------------------------------\n"); #define __P(F) \ SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F) #define P(F) \ SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F) #define __PN(F) \ SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) #define PN(F) \ SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) PN(se.exec_start); PN(se.vruntime); PN(se.sum_exec_runtime); nr_switches = p->nvcsw + p->nivcsw; #ifdef CONFIG_SCHEDSTATS PN(se.statistics.wait_start); PN(se.statistics.sleep_start); PN(se.statistics.block_start); PN(se.statistics.sleep_max); PN(se.statistics.block_max); PN(se.statistics.exec_max); PN(se.statistics.slice_max); PN(se.statistics.wait_max); PN(se.statistics.wait_sum); P(se.statistics.wait_count); PN(se.statistics.iowait_sum); P(se.statistics.iowait_count); P(se.nr_migrations); P(se.statistics.nr_migrations_cold); P(se.statistics.nr_failed_migrations_affine); P(se.statistics.nr_failed_migrations_running); P(se.statistics.nr_failed_migrations_hot); P(se.statistics.nr_forced_migrations); P(se.statistics.nr_wakeups); P(se.statistics.nr_wakeups_sync); P(se.statistics.nr_wakeups_migrate); P(se.statistics.nr_wakeups_local); P(se.statistics.nr_wakeups_remote); P(se.statistics.nr_wakeups_affine); P(se.statistics.nr_wakeups_affine_attempts); P(se.statistics.nr_wakeups_passive); P(se.statistics.nr_wakeups_idle); { u64 avg_atom, avg_per_cpu; avg_atom = p->se.sum_exec_runtime; if (nr_switches) do_div(avg_atom, nr_switches); else avg_atom = -1LL; avg_per_cpu = p->se.sum_exec_runtime; if (p->se.nr_migrations) { avg_per_cpu = div64_u64(avg_per_cpu, p->se.nr_migrations); } else { avg_per_cpu = -1LL; } __PN(avg_atom); __PN(avg_per_cpu); } #endif __P(nr_switches); SEQ_printf(m, "%-35s:%21Ld\n", "nr_voluntary_switches", (long long)p->nvcsw); SEQ_printf(m, "%-35s:%21Ld\n", "nr_involuntary_switches", (long long)p->nivcsw); P(se.load.weight); #ifdef CONFIG_SMP P(se.avg.runnable_avg_sum); P(se.avg.runnable_avg_period); P(se.avg.load_avg_contrib); P(se.avg.decay_count); # ifdef MTK_SCHED_CMP_PRINT # ifdef CONFIG_MTK_SCHED_CMP /* usage_avg_sum & load_avg_ratio are based on Linaro 12.11 */ P(se.avg.usage_avg_sum); P(se.avg.load_avg_ratio); # endif P(se.avg.last_runnable_update); # endif #endif P(policy); P(prio); #undef PN #undef __PN #undef P #undef __P { unsigned int this_cpu = raw_smp_processor_id(); u64 t0, t1; t0 = cpu_clock(this_cpu); t1 = cpu_clock(this_cpu); SEQ_printf(m, "%-35s:%21Ld\n", "clock-delta", (long long)(t1-t0)); } } void proc_sched_set_task(struct task_struct *p) { #ifdef CONFIG_SCHEDSTATS memset(&p->se.statistics, 0, sizeof(p->se.statistics)); #endif } #define read_trylock_irqsave(lock, flags) \ ({ \ typecheck(unsigned long, flags); \ local_irq_save(flags); \ read_trylock(lock)? \ 1 : ({ local_irq_restore(flags); 0; }); \ }) int read_trylock_n_irqsave(rwlock_t *lock, unsigned long *flags, struct seq_file *m, char *msg){ int locked, trylock_cnt=0; do{ locked = read_trylock_irqsave(lock, *flags); trylock_cnt++; mdelay(10); }while((!locked) && (trylock_cnt < TRYLOCK_NUM)); if (!locked){ #ifdef CONFIG_DEBUG_SPINLOCK struct task_struct *owner = NULL; #endif SEQ_printf(m, "Warning: fail to get lock in %s\n", msg); #ifdef CONFIG_DEBUG_SPINLOCK if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT ) owner = lock->owner; SEQ_printf(m, " lock: %p, .magic: %08x, .owner: %s/%d, " ".owner_cpu: %d, value: %d\n", lock, lock->magic, owner ? owner-> comm: "<<none>>", owner ? task_pid_nr(owner): -1, lock->owner_cpu, lock->raw_lock.lock); #endif } return locked; } int raw_spin_trylock_n_irqsave(raw_spinlock_t *lock, unsigned long *flags, struct seq_file *m, char *msg){ int locked, trylock_cnt=0; do{ locked = raw_spin_trylock_irqsave(lock, *flags); trylock_cnt++; mdelay(10); }while((!locked) && (trylock_cnt < TRYLOCK_NUM)); if (!locked){ #ifdef CONFIG_DEBUG_SPINLOCK struct task_struct *owner = NULL; #endif SEQ_printf(m, "Warning: fail to get lock in %s\n", msg); #ifdef CONFIG_DEBUG_SPINLOCK if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT ) owner = lock->owner; SEQ_printf(m, " lock: %x, .magic: %08x, .owner: %s/%d, " ".owner_cpu: %d, value: %d\n", (int)lock, lock->magic, owner ? owner-> comm: "<<none>>", owner ? task_pid_nr(owner): -1, lock->owner_cpu, lock->raw_lock.slock); #endif } return locked; } int spin_trylock_n_irqsave(spinlock_t *lock, unsigned long *flags, struct seq_file *m, char *msg){ int locked, trylock_cnt=0; do{ locked = spin_trylock_irqsave(lock, *flags); trylock_cnt++; mdelay(10); }while((!locked) && (trylock_cnt < TRYLOCK_NUM)); if (!locked){ #ifdef CONFIG_DEBUG_SPINLOCK raw_spinlock_t rlock = lock->rlock; struct task_struct *owner = NULL; #endif SEQ_printf(m, "Warning: fail to get lock in %s\n", msg); #ifdef CONFIG_DEBUG_SPINLOCK if (rlock.owner && rlock.owner != SPINLOCK_OWNER_INIT ) owner = rlock.owner; SEQ_printf(m, " lock: %x, .magic: %08x, .owner: %s/%d, " ".owner_cpu: %d, value: %d\n", (int) &rlock, rlock.magic, owner ? owner-> comm: "<<none>>", owner ? task_pid_nr(owner): -1, rlock.owner_cpu, rlock.raw_lock.slock); #endif } return locked; } void print_rq_at_KE(struct seq_file *m, struct rq *rq, int rq_cpu) { struct task_struct *g, *p; unsigned long flags; int locked; SEQ_printf(m, "runnable tasks:\n" " task PID tree-key switches prio" " exec-runtime sum-exec sum-sleep\n" "------------------------------------------------------" "----------------------------------------------------\n"); //read_lock_irqsave(&tasklist_lock, flags); locked = read_trylock_n_irqsave(&tasklist_lock, &flags, m, "print_rq_at_KE"); do_each_thread(g, p) { if (!p->on_rq || task_cpu(p) != rq_cpu) continue; print_task(m, rq, p); } while_each_thread(g, p); if (locked) read_unlock_irqrestore(&tasklist_lock, flags); } #ifdef CONFIG_FAIR_GROUP_SCHED static void print_cfs_group_stats_at_KE(struct seq_file *m, int cpu, struct task_group *tg) { struct sched_entity *se = tg->se[cpu]; #define P(F) \ SEQ_printf(m, " .%-22s: %lld\n", #F, (long long)F) #define PN(F) \ SEQ_printf(m, " .%-22s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) if (!se) { struct sched_avg *avg = &cpu_rq(cpu)->avg; P(avg->runnable_avg_sum); P(avg->runnable_avg_period); #ifdef MTK_SCHED_CMP_PRINT # ifdef CONFIG_MTK_SCHED_CMP /* usage_avg_sum & load_avg_ratio are based on Linaro 12.11 */ P(avg->usage_avg_sum); P(avg->load_avg_ratio); # endif P(avg->last_runnable_update); #endif return; } PN(se->exec_start); PN(se->vruntime); PN(se->sum_exec_runtime); P(se->load.weight); #ifdef CONFIG_SMP P(se->avg.runnable_avg_sum); P(se->avg.runnable_avg_period); P(se->avg.usage_avg_sum); P(se->avg.load_avg_contrib); P(se->avg.decay_count); # ifdef MTK_SCHED_CMP_PRINT # ifdef CONFIG_MTK_SCHED_CMP /* usage_avg_sum & load_avg_ratio are based on Linaro 12.11 */ P(se->avg.usage_avg_sum); P(se->avg.load_avg_ratio); # endif P(se->avg.last_runnable_update); # endif #endif #undef PN #undef P } #endif void print_cfs_rq_at_KE(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) { s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, spread, rq0_min_vruntime, spread0; struct rq *rq = cpu_rq(cpu); struct sched_entity *last; unsigned long flags; int locked; #ifdef CONFIG_FAIR_GROUP_SCHED SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); #else SEQ_printf(m, "cfs_rq[%d]:\n", cpu); #endif SEQ_printf(m, " .%-22s: %Ld.%06ld\n", "exec_clock", SPLIT_NS(cfs_rq->exec_clock)); //raw_spin_lock_irqsave(&rq->lock, flags); locked = raw_spin_trylock_n_irqsave(&rq->lock, &flags, m, "print_cfs_rq_at_KE"); if (cfs_rq->rb_leftmost) MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; last = __pick_last_entity(cfs_rq); if (last) max_vruntime = last->vruntime; min_vruntime = cfs_rq->min_vruntime; rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; if(locked) raw_spin_unlock_irqrestore(&rq->lock, flags); SEQ_printf(m, " .%-22s: %Ld.%06ld\n", "MIN_vruntime", SPLIT_NS(MIN_vruntime)); SEQ_printf(m, " .%-22s: %Ld.%06ld\n", "min_vruntime", SPLIT_NS(min_vruntime)); SEQ_printf(m, " .%-22s: %Ld.%06ld\n", "max_vruntime", SPLIT_NS(max_vruntime)); spread = max_vruntime - MIN_vruntime; SEQ_printf(m, " .%-22s: %Ld.%06ld\n", "spread", SPLIT_NS(spread)); spread0 = min_vruntime - rq0_min_vruntime; SEQ_printf(m, " .%-22s: %Ld.%06ld\n", "spread0", SPLIT_NS(spread0)); SEQ_printf(m, " .%-22s: %d\n", "nr_spread_over", cfs_rq->nr_spread_over); SEQ_printf(m, " .%-22s: %d\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-22s: %ld\n", "load", cfs_rq->load.weight); #ifdef CONFIG_SMP SEQ_printf(m, " .%-22s: %ld\n", "runnable_load_avg", cfs_rq->runnable_load_avg); SEQ_printf(m, " .%-22s: %ld\n", "blocked_load_avg", cfs_rq->blocked_load_avg); # ifdef CONFIG_FAIR_GROUP_SCHED SEQ_printf(m, " .%-22s: %ld\n", "tg_load_contrib", cfs_rq->tg_load_contrib); SEQ_printf(m, " .%-22s: %d\n", "tg_runnable_contrib", cfs_rq->tg_runnable_contrib); SEQ_printf(m, " .%-22s: %ld\n", "tg->load_avg", atomic_long_read(&cfs_rq->tg->load_avg)); SEQ_printf(m, " .%-22s: %d\n", "tg->runnable_avg", atomic_read(&cfs_rq->tg->runnable_avg)); # endif #endif #ifdef CONFIG_FAIR_GROUP_SCHED print_cfs_group_stats_at_KE(m, cpu, cfs_rq->tg); #endif } #define for_each_leaf_cfs_rq(rq, cfs_rq) \ list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) void print_cfs_stats_at_KE(struct seq_file *m, int cpu) { struct cfs_rq *cfs_rq; rcu_read_lock(); for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) print_cfs_rq_at_KE(m, cpu, cfs_rq); rcu_read_unlock(); } void print_rt_rq_at_KE(struct seq_file *m, int cpu, struct rt_rq *rt_rq) { #ifdef CONFIG_RT_GROUP_SCHED SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); #else SEQ_printf(m, "rt_rq[%d]:\n", cpu); #endif #define P(x) \ SEQ_printf(m, " .%-22s: %Ld\n", #x, (long long)(rt_rq->x)) #define PN(x) \ SEQ_printf(m, " .%-22s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) P(rt_nr_running); P(rt_throttled); PN(rt_time); PN(rt_runtime); #undef PN #undef P } #ifdef CONFIG_RT_GROUP_SCHED typedef struct task_group *rt_rq_iter_t; static inline struct task_group *next_task_group(struct task_group *tg) { do { tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list); } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); if (&tg->list == &task_groups) tg = NULL; return tg; } #define for_each_rt_rq(rt_rq, iter, rq) \ for (iter = container_of(&task_groups, typeof(*iter), list); \ (iter = next_task_group(iter)) && \ (rt_rq = iter->rt_rq[cpu_of(rq)]);) #else /* !CONFIG_RT_GROUP_SCHED */ typedef struct rt_rq *rt_rq_iter_t; #define for_each_rt_rq(rt_rq, iter, rq) \ for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) #endif void print_rt_stats_at_KE(struct seq_file *m, int cpu) { rt_rq_iter_t iter; struct rt_rq *rt_rq; rcu_read_lock(); for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) print_rt_rq_at_KE(m, cpu, rt_rq); rcu_read_unlock(); } static void print_cpu_at_KE(struct seq_file *m, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; int locked; #ifdef CONFIG_X86 { unsigned int freq = cpu_khz ? : 1; SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n", cpu, freq / 1000, (freq % 1000)); } #else SEQ_printf(m, "cpu#%d: %s\n", cpu, cpu_is_offline(cpu)?"Offline":"Online"); #endif #define P(x) \ do { \ if (sizeof(rq->x) == 4) \ SEQ_printf(m, " .%-22s: %ld\n", #x, (long)(rq->x)); \ else \ SEQ_printf(m, " .%-22s: %Ld\n", #x, (long long)(rq->x));\ } while (0) #define PN(x) \ SEQ_printf(m, " .%-22s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) P(nr_running); SEQ_printf(m, " .%-22s: %lu\n", "load", rq->load.weight); P(nr_switches); P(nr_load_updates); P(nr_uninterruptible); PN(next_balance); P(curr->pid); PN(clock); P(cpu_load[0]); P(cpu_load[1]); P(cpu_load[2]); P(cpu_load[3]); P(cpu_load[4]); #undef P #undef PN #ifdef CONFIG_SCHEDSTATS #define P(n) SEQ_printf(m, " .%-22s: %d\n", #n, rq->n); #define P64(n) SEQ_printf(m, " .%-22s: %Ld\n", #n, rq->n); P(yld_count); P(sched_count); P(sched_goidle); #ifdef CONFIG_SMP P64(avg_idle); #endif P(ttwu_count); P(ttwu_local); #undef P #undef P64 #endif //spin_lock_irqsave(&sched_debug_lock, flags); locked = spin_trylock_n_irqsave( &sched_debug_lock, &flags, m, "print_cpu_at_KE"); print_cfs_stats_at_KE(m, cpu); print_rt_stats_at_KE(m, cpu); rcu_read_lock(); print_rq_at_KE(m, rq, cpu); SEQ_printf(m, "======================================================" "====================================================\n"); rcu_read_unlock(); if (locked) spin_unlock_irqrestore(&sched_debug_lock, flags); } static void sched_debug_header_at_KE(struct seq_file *m) { u64 ktime=0, sched_clk, cpu_clk; unsigned long flags; local_irq_save(flags); // ktime = ktime_to_ns(ktime_get()); sched_clk = sched_clock(); cpu_clk = local_clock(); local_irq_restore(flags); SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n", init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); #define P(x) \ SEQ_printf(m, "%-22s: %Ld\n", #x, (long long)(x)) #define PN(x) \ SEQ_printf(m, "%-22s: %Ld.%06ld\n", #x, SPLIT_NS(x)) PN(ktime); PN(sched_clk); PN(cpu_clk); P(jiffies); #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK P(sched_clock_stable); #endif #undef PN #undef P //SEQ_printf(m, "\n"); SEQ_printf(m, "sysctl_sched\n"); #define P(x) \ SEQ_printf(m, " .%-35s: %Ld\n", #x, (long long)(x)) #define PN(x) \ SEQ_printf(m, " .%-35s: %Ld.%06ld\n", #x, SPLIT_NS(x)) PN(sysctl_sched_latency); PN(sysctl_sched_min_granularity); PN(sysctl_sched_wakeup_granularity); P(sysctl_sched_child_runs_first); P(sysctl_sched_features); #undef PN #undef P SEQ_printf(m, " .%-35s: %d (%s)\n", "sysctl_sched_tunable_scaling", sysctl_sched_tunable_scaling, sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); SEQ_printf(m, "\n"); } void sysrq_sched_debug_show_at_KE(void) { int cpu; unsigned long flags; int locked; sched_debug_header_at_KE(NULL); //read_lock_irqsave(&tasklist_lock, flags); locked = read_trylock_n_irqsave(&tasklist_lock, &flags, NULL, "sched_debug_show_at_KE"); //for_each_online_cpu(cpu) for_each_possible_cpu(cpu) print_cpu_at_KE(NULL, cpu); if (locked) read_unlock_irqrestore(&tasklist_lock, flags); } #ifdef CONFIG_MET_SCHED_HMP /* MET */ #include <linux/export.h> #include <linux/met_drv.h> static char header[] = "met-info [000] 0.0: ms_ud_sys_header: TaskTh,B->th,L->th,d,d\n" "met-info [000] 0.0: ms_ud_sys_header: HmpStat,force_up,force_down,d,d\n" "met-info [000] 0.0: ms_ud_sys_header: HmpLoad,big_load_avg,little_load_avg,d,d\n" "met-info [000] 0.0: ms_ud_sys_header: RqLen,rq0,rq1,rq2,rq3,d,d,d,d\n" "met-info [000] 0.0: ms_ud_sys_header: CfsLen,cfs_rq0,cfs_rq1,cfs_rq2,cfs_rq3,d,d,d,d\n" "met-info [000] 0.0: ms_ud_sys_header: RtLen,rt_rq0,rt_rq1,rt_rq2,rt_rq3,d,d,d,d\n"; static char help[] = " --met_hmp_cfs monitor hmp_cfs\n"; static int sample_print_help(char *buf, int len) { return snprintf(buf, PAGE_SIZE, help); } static int sample_print_header(char *buf, int len) { return snprintf(buf, PAGE_SIZE, header); } unsigned int mt_cfs_dbg=0; static void sample_start(void) { mt_cfs_dbg=1; return; } static void sample_stop(void) { mt_cfs_dbg=0; return; } struct metdevice met_hmp_cfs = { .name = "hmp_cfs", .owner = THIS_MODULE, .type = MET_TYPE_BUS, .start = sample_start, .stop = sample_stop, .print_help = sample_print_help, .print_header = sample_print_header, }; EXPORT_SYMBOL(met_hmp_cfs); void TaskTh(unsigned int B_th,unsigned int L_th){ if(mt_cfs_dbg) trace_printk("%d,%d\n",B_th,L_th); } void HmpStat(struct hmp_statisic *hmp_stats){ if(mt_cfs_dbg) trace_printk("%d,%d\n",hmp_stats->nr_force_up,hmp_stats->nr_force_down); } void HmpLoad(int big_load_avg, int little_load_avg){ if(mt_cfs_dbg) trace_printk("%d,%d\n",big_load_avg,little_load_avg); } static DEFINE_PER_CPU(unsigned int, cfsrqCnt); static DEFINE_PER_CPU(unsigned int, rtrqCnt); static DEFINE_PER_CPU(unsigned int, rqCnt); void RqLen(int cpu, int length){ if(mt_cfs_dbg){ per_cpu(rqCnt, cpu) = length; #if NR_CPUS == 4 trace_printk("%d,%d,%d,%d\n",per_cpu(rqCnt,0),per_cpu(rqCnt,1),per_cpu(rqCnt,2),per_cpu(rqCnt,3)); #endif } } void CfsLen(int cpu, int length){ if(mt_cfs_dbg){ per_cpu(cfsrqCnt, cpu) = length; #if NR_CPUS == 4 trace_printk("%d,%d,%d,%d\n",per_cpu(cfsrqCnt,0),per_cpu(cfsrqCnt,1),per_cpu(cfsrqCnt,2),per_cpu(cfsrqCnt,3)); #endif } } void RtLen(int cpu, int length){ if(mt_cfs_dbg){ per_cpu(rtrqCnt, cpu) = length; #if NR_CPUS == 4 trace_printk("%d,%d,%d,%d\n",per_cpu(rtrqCnt,0),per_cpu(rtrqCnt,1),per_cpu(rtrqCnt,2),per_cpu(rtrqCnt,3)); #endif } } #endif
gpl-2.0
swarmnyc/gst-plugins-bad
ext/opencv/gstfaceblur.cpp
5
13833
/* * GStreamer * Copyright (C) 2005 Thomas Vander Stichele <thomas@apestaart.org> * Copyright (C) 2005 Ronald S. Bultje <rbultje@ronald.bitfreak.net> * Copyright (C) 2008 Michael Sheldon <mike@mikeasoft.com> * Copyright (C) 2011 Robert Jobbagy <jobbagy.robert@gmail.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Alternatively, the contents of this file may be used under the * GNU Lesser General Public License Version 2.1 (the "LGPL"), in * which case the following provisions apply instead of the ones * mentioned above: * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ /** * SECTION:element-faceblur * * Blurs faces in images and videos. * * <refsect2> * <title>Example launch line</title> * |[ * gst-launch-1.0 autovideosrc ! videoconvert ! faceblur ! videoconvert ! autovideosink * ]| * </refsect2> */ #ifdef HAVE_CONFIG_H # include <config.h> #endif #include <gst/gst.h> #include <vector> #include "gstopencvutils.h" #include "gstfaceblur.h" #include <opencv2/imgproc/imgproc_c.h> #include <opencv2/imgproc/imgproc.hpp> GST_DEBUG_CATEGORY_STATIC (gst_face_blur_debug); #define GST_CAT_DEFAULT gst_face_blur_debug #define DEFAULT_PROFILE OPENCV_PREFIX G_DIR_SEPARATOR_S "share" \ G_DIR_SEPARATOR_S OPENCV_PATH_NAME G_DIR_SEPARATOR_S "haarcascades" \ G_DIR_SEPARATOR_S "haarcascade_frontalface_default.xml" #define DEFAULT_SCALE_FACTOR 1.25 #define DEFAULT_FLAGS CV_HAAR_DO_CANNY_PRUNING #define DEFAULT_MIN_NEIGHBORS 3 #define DEFAULT_MIN_SIZE_WIDTH 30 #define DEFAULT_MIN_SIZE_HEIGHT 30 using namespace cv; enum { PROP_0, PROP_PROFILE, PROP_SCALE_FACTOR, PROP_MIN_NEIGHBORS, PROP_FLAGS, PROP_MIN_SIZE_WIDTH, PROP_MIN_SIZE_HEIGHT }; /** * GstOpencvFaceDetectFlags: * @GST_CAMERABIN_FLAG_SOURCE_RESIZE: enable video crop and scale * after capture * * Flags parameter to OpenCV's cvHaarDetectObjects function. */ typedef enum { GST_OPENCV_FACE_BLUR_HAAR_DO_CANNY_PRUNING = (1 << 0) } GstOpencvFaceBlurFlags; #define GST_TYPE_OPENCV_FACE_BLUR_FLAGS (gst_opencv_face_blur_flags_get_type()) static void register_gst_opencv_face_blur_flags (GType * id) { static const GFlagsValue values[] = { {(guint) GST_OPENCV_FACE_BLUR_HAAR_DO_CANNY_PRUNING, "Do Canny edge detection to discard some regions", "do-canny-pruning"}, {0, NULL, NULL} }; *id = g_flags_register_static ("GstOpencvFaceBlurFlags", values); } static GType gst_opencv_face_blur_flags_get_type (void) { static GType id; static GOnce once = G_ONCE_INIT; g_once (&once, (GThreadFunc) register_gst_opencv_face_blur_flags, &id); return id; } /* the capabilities of the inputs and outputs. */ static GstStaticPadTemplate sink_factory = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGB")) ); static GstStaticPadTemplate src_factory = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("RGB")) ); G_DEFINE_TYPE (GstFaceBlur, gst_face_blur, GST_TYPE_OPENCV_VIDEO_FILTER); static void gst_face_blur_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec); static void gst_face_blur_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec); static gboolean gst_face_blur_set_caps (GstOpencvVideoFilter * transform, gint in_width, gint in_height, gint in_depth, gint in_channels, gint out_width, gint out_height, gint out_depth, gint out_channels); static GstFlowReturn gst_face_blur_transform_ip (GstOpencvVideoFilter * transform, GstBuffer * buffer, IplImage * img); static CascadeClassifier *gst_face_blur_load_profile (GstFaceBlur * filter, gchar * profile); /* Clean up */ static void gst_face_blur_finalize (GObject * obj) { GstFaceBlur *filter = GST_FACE_BLUR (obj); if (filter->cvGray) cvReleaseImage (&filter->cvGray); if (filter->cvCascade) delete filter->cvCascade; g_free (filter->profile); G_OBJECT_CLASS (gst_face_blur_parent_class)->finalize (obj); } /* initialize the faceblur's class */ static void gst_face_blur_class_init (GstFaceBlurClass * klass) { GObjectClass *gobject_class; GstOpencvVideoFilterClass *gstopencvbasefilter_class; GstElementClass *element_class = GST_ELEMENT_CLASS (klass); gobject_class = (GObjectClass *) klass; gstopencvbasefilter_class = (GstOpencvVideoFilterClass *) klass; gstopencvbasefilter_class->cv_trans_ip_func = gst_face_blur_transform_ip; gstopencvbasefilter_class->cv_set_caps = gst_face_blur_set_caps; gobject_class->finalize = GST_DEBUG_FUNCPTR (gst_face_blur_finalize); gobject_class->set_property = gst_face_blur_set_property; gobject_class->get_property = gst_face_blur_get_property; g_object_class_install_property (gobject_class, PROP_PROFILE, g_param_spec_string ("profile", "Profile", "Location of Haar cascade file to use for face blurion", DEFAULT_PROFILE, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); g_object_class_install_property (gobject_class, PROP_FLAGS, g_param_spec_flags ("flags", "Flags", "Flags to cvHaarDetectObjects", GST_TYPE_OPENCV_FACE_BLUR_FLAGS, DEFAULT_FLAGS, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); g_object_class_install_property (gobject_class, PROP_SCALE_FACTOR, g_param_spec_double ("scale-factor", "Scale factor", "Factor by which the windows is scaled after each scan", 1.1, 10.0, DEFAULT_SCALE_FACTOR, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); g_object_class_install_property (gobject_class, PROP_MIN_NEIGHBORS, g_param_spec_int ("min-neighbors", "Mininum neighbors", "Minimum number (minus 1) of neighbor rectangles that makes up " "an object", 0, G_MAXINT, DEFAULT_MIN_NEIGHBORS, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); g_object_class_install_property (gobject_class, PROP_MIN_SIZE_WIDTH, g_param_spec_int ("min-size-width", "Minimum size width", "Minimum window width size", 0, G_MAXINT, DEFAULT_MIN_SIZE_WIDTH, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); g_object_class_install_property (gobject_class, PROP_MIN_SIZE_HEIGHT, g_param_spec_int ("min-size-height", "Minimum size height", "Minimum window height size", 0, G_MAXINT, DEFAULT_MIN_SIZE_HEIGHT, (GParamFlags) (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS))); gst_element_class_set_static_metadata (element_class, "faceblur", "Filter/Effect/Video", "Blurs faces in images and videos", "Michael Sheldon <mike@mikeasoft.com>,Robert Jobbagy <jobbagy.robert@gmail.com>"); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&src_factory)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&sink_factory)); } /* initialize the new element * instantiate pads and add them to element * set pad calback functions * initialize instance structure */ static void gst_face_blur_init (GstFaceBlur * filter) { filter->profile = g_strdup (DEFAULT_PROFILE); filter->cvCascade = gst_face_blur_load_profile (filter, filter->profile); filter->sent_profile_load_failed_msg = FALSE; filter->scale_factor = DEFAULT_SCALE_FACTOR; filter->min_neighbors = DEFAULT_MIN_NEIGHBORS; filter->flags = DEFAULT_FLAGS; filter->min_size_width = DEFAULT_MIN_SIZE_WIDTH; filter->min_size_height = DEFAULT_MIN_SIZE_HEIGHT; gst_opencv_video_filter_set_in_place (GST_OPENCV_VIDEO_FILTER_CAST (filter), TRUE); } static void gst_face_blur_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { GstFaceBlur *filter = GST_FACE_BLUR (object); switch (prop_id) { case PROP_PROFILE: g_free (filter->profile); if (filter->cvCascade) delete filter->cvCascade; filter->profile = g_value_dup_string (value); filter->cvCascade = gst_face_blur_load_profile (filter, filter->profile); filter->sent_profile_load_failed_msg = FALSE; break; case PROP_SCALE_FACTOR: filter->scale_factor = g_value_get_double (value); break; case PROP_MIN_NEIGHBORS: filter->min_neighbors = g_value_get_int (value); break; case PROP_MIN_SIZE_WIDTH: filter->min_size_width = g_value_get_int (value); break; case PROP_MIN_SIZE_HEIGHT: filter->min_size_height = g_value_get_int (value); break; case PROP_FLAGS: filter->flags = g_value_get_flags (value); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static void gst_face_blur_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec) { GstFaceBlur *filter = GST_FACE_BLUR (object); switch (prop_id) { case PROP_PROFILE: g_value_set_string (value, filter->profile); break; case PROP_SCALE_FACTOR: g_value_set_double (value, filter->scale_factor); break; case PROP_MIN_NEIGHBORS: g_value_set_int (value, filter->min_neighbors); break; case PROP_MIN_SIZE_WIDTH: g_value_set_int (value, filter->min_size_width); break; case PROP_MIN_SIZE_HEIGHT: g_value_set_int (value, filter->min_size_height); break; case PROP_FLAGS: g_value_set_flags (value, filter->flags); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static gboolean gst_face_blur_set_caps (GstOpencvVideoFilter * transform, gint in_width, gint in_height, gint in_depth, gint in_channels, gint out_width, gint out_height, gint out_depth, gint out_channels) { GstFaceBlur *filter = GST_FACE_BLUR (transform); if (filter->cvGray) cvReleaseImage (&filter->cvGray); filter->cvGray = cvCreateImage (cvSize (in_width, in_height), IPL_DEPTH_8U, 1); return TRUE; } static GstFlowReturn gst_face_blur_transform_ip (GstOpencvVideoFilter * transform, GstBuffer * buffer, IplImage * img) { GstFaceBlur *filter = GST_FACE_BLUR (transform); vector < Rect > faces; unsigned int i; if (!filter->cvCascade) { if (filter->profile != NULL && filter->sent_profile_load_failed_msg == FALSE) { GST_ELEMENT_WARNING (filter, RESOURCE, NOT_FOUND, ("Profile %s is missing.", filter->profile), ("missing faceblur profile file %s", filter->profile)); filter->sent_profile_load_failed_msg = TRUE; } return GST_FLOW_OK; } cvCvtColor (img, filter->cvGray, CV_RGB2GRAY); Mat image (filter->cvGray, Rect (filter->cvGray->origin, filter->cvGray->origin, filter->cvGray->width, filter->cvGray->height)); filter->cvCascade->detectMultiScale (image, faces, filter->scale_factor, filter->min_neighbors, filter->flags, cvSize (filter->min_size_width, filter->min_size_height), cvSize (0, 0)); if (!faces.empty ()) { for (i = 0; i < faces.size (); ++i) { Rect *r = &faces[i]; Mat roi (img, Rect (r->x, r->y, r->width, r->height)); blur (roi, roi, Size (11, 11)); GaussianBlur (roi, roi, Size (11, 11), 0, 0); } } return GST_FLOW_OK; } static CascadeClassifier * gst_face_blur_load_profile (GstFaceBlur * filter, gchar * profile) { CascadeClassifier *cascade; cascade = new CascadeClassifier (profile); if (cascade->empty ()) { GST_ERROR_OBJECT (filter, "Invalid profile file: %s", profile); delete cascade; return NULL; } return cascade; } /* entry point to initialize the plug-in * initialize the plug-in itself * register the element factories and other features */ gboolean gst_face_blur_plugin_init (GstPlugin * plugin) { /* debug category for filtering log messages */ GST_DEBUG_CATEGORY_INIT (gst_face_blur_debug, "faceblur", 0, "Blurs faces in images and videos"); return gst_element_register (plugin, "faceblur", GST_RANK_NONE, GST_TYPE_FACE_BLUR); }
gpl-2.0
miiicmueller/TerraZoo
contiki-2.7/cpu/cc2430/dev/clock.c
5
5860
/* * Copyright (c) 2009, Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file is part of the Contiki operating system. */ /** * \file * Implementation of the clock functions for the cc243x * \author * Zach Shelby (zach@sensinode.com) - original * George Oikonomou - <oikonomou@users.sourceforge.net> */ #include "sys/clock.h" #include "sys/etimer.h" #include "cc2430_sfr.h" #include "sys/energest.h" /* Sleep timer runs on the 32k RC osc. */ /* One clock tick is 7.8 ms */ #define TICK_VAL (32768/128) /* 256 */ /*---------------------------------------------------------------------------*/ #if CLOCK_CONF_STACK_FRIENDLY volatile uint8_t sleep_flag; #endif /*---------------------------------------------------------------------------*/ /* Used in sleep timer interrupt for calculating the next interrupt time */ static unsigned long timer_value; static volatile CC_AT_DATA clock_time_t count = 0; /* Uptime in ticks */ static volatile CC_AT_DATA clock_time_t seconds = 0; /* Uptime in secs */ /*---------------------------------------------------------------------------*/ /** * Each iteration is ~1.0xy usec, so this function delays for roughly len usec */ void clock_delay_usec(uint16_t len) { DISABLE_INTERRUPTS(); while(len--) { ASM(nop); ASM(nop); ASM(nop); ASM(nop); } ENABLE_INTERRUPTS(); } /*---------------------------------------------------------------------------*/ /** * Wait for a multiple of ~8 ms (a tick) */ void clock_wait(clock_time_t i) { clock_time_t start; start = clock_time(); while(clock_time() - start < (clock_time_t)i); } /*---------------------------------------------------------------------------*/ CCIF clock_time_t clock_time(void) { return count; } /*---------------------------------------------------------------------------*/ CCIF unsigned long clock_seconds(void) { return seconds; } /*---------------------------------------------------------------------------*/ void clock_init(void) { CLKCON = OSC32K | TICKSPD2 | TICKSPD1; /* tickspeed 500 kHz for timers[1-4] */ /* Initialize tick value */ timer_value = ST0; /* ST low bits [7:0] */ timer_value += ((unsigned long int)ST1) << 8; /* middle bits [15:8] */ timer_value += ((unsigned long int)ST2) << 16; /* high bits [23:16] */ timer_value += TICK_VAL; /* Init value 256 */ ST2 = (unsigned char)(timer_value >> 16); ST1 = (unsigned char)(timer_value >> 8); ST0 = (unsigned char)timer_value; IEN0_STIE = 1; /* IEN0.STIE acknowledge Sleep Timer Interrupt */ } /*---------------------------------------------------------------------------*/ #pragma save #if CC_CONF_OPTIMIZE_STACK_SIZE #pragma exclude bits #endif void clock_ISR(void) __interrupt(ST_VECTOR) { DISABLE_INTERRUPTS(); ENERGEST_ON(ENERGEST_TYPE_IRQ); /* * If the Sleep timer throws an interrupt while we are powering down to * PM1, we need to abort the power down. Clear SLEEP.MODE, this will signal * main() to abort the PM1 transition */ SLEEP &= 0xFC; /* * Read value of the ST0:ST1:ST2, add TICK_VAL and write it back. * Next interrupt occurs after the current time + TICK_VAL */ timer_value = ST0; timer_value += ((unsigned long int)ST1) << 8; timer_value += ((unsigned long int)ST2) << 16; timer_value += TICK_VAL; ST2 = (unsigned char)(timer_value >> 16); ST1 = (unsigned char)(timer_value >> 8); ST0 = (unsigned char)timer_value; ++count; /* Make sure the CLOCK_CONF_SECOND is a power of two, to ensure that the modulo operation below becomes a logical and and not an expensive divide. Algorithm from Wikipedia: http://en.wikipedia.org/wiki/Power_of_two */ #if (CLOCK_CONF_SECOND & (CLOCK_CONF_SECOND - 1)) != 0 #error CLOCK_CONF_SECOND must be a power of two (i.e., 1, 2, 4, 8, 16, 32, 64, ...). #error Change CLOCK_CONF_SECOND in contiki-conf.h. #endif if(count % CLOCK_CONF_SECOND == 0) { ++seconds; } #if CLOCK_CONF_STACK_FRIENDLY sleep_flag = 1; #else if(etimer_pending() && (etimer_next_expiration_time() - count - 1) > MAX_TICKS) { etimer_request_poll(); } #endif IRCON_STIF = 0; ENERGEST_OFF(ENERGEST_TYPE_IRQ); ENABLE_INTERRUPTS(); } #pragma restore /*---------------------------------------------------------------------------*/
gpl-2.0
huahbo/src
user/fomels/Mzero.c
5
2197
/* Zero crossings with sub-pixel resolution. */ /* Copyright (C) 2011 University of Texas at Austin This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <rsf.h> static sf_eno eno; static int it; static float func_eno(float t); int main (int argc, char* argv[]) { bool down; int nt, i2, n2, *n0, nz, nw; float *trace, dt, t0, a, b, t; sf_file inp, nzero, out; sf_init (argc,argv); inp = sf_input("in"); out = sf_output("out"); nzero = sf_output("nzero"); if (!sf_histint(inp,"n1",&nt)) sf_error("No n1= in input"); if (!sf_histfloat(inp,"d1",&dt)) dt=1.; if (!sf_histfloat(inp,"o1",&t0)) t0=0.; n2 = sf_leftsize(inp,1); trace = sf_floatalloc(nt); n0 = sf_intalloc(n2); sf_putint(nzero,"n1",1); sf_settype(nzero,SF_INT); if (!sf_getint ("nw",&nw)) nw=4; /* Interpolation accuracy */ if (!sf_getbool("down",&down)) down=false; /* only zeros on the way down */ eno = sf_eno_init (nw, nt); for (i2=0; i2 < n2; i2++) { sf_floatread(trace,nt,inp); nz = 0; for (it = 0; it < nt-1; it++) { a = trace[it]; b = trace[it+1]; if ((a <= 0. && b > 0. && !down) || (a >= 0. && b < 0.)) { t = sf_zero(func_eno,0.,1.,a,b,1.e-3,false); trace[nz] = t0+(it+t)*dt; nz++; } } sf_floatwrite(trace,nt,out); n0[i2] = nz; } sf_intwrite(n0,n2,nzero); exit(0); } static float func_eno(float t) /* interpolation function */ { float f, g; sf_eno_apply (eno,it,t,&f,&g,FUNC); return f; }
gpl-2.0
archos-sa/archos-gpl-gen7-kernel
net/ipv4/netfilter/arp_tables.c
5
46287
/* * Packet matching code for ARP packets. * * Based heavily, if not almost entirely, upon ip_tables.c framework. * * Some ARP specific bits are: * * Copyright (C) 2002 David S. Miller (davem@redhat.com) * */ #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/capability.h> #include <linux/if_arp.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/err.h> #include <net/compat.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_arp/arp_tables.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); MODULE_DESCRIPTION("arptables core"); /*#define DEBUG_ARP_TABLES*/ /*#define DEBUG_ARP_TABLES_USER*/ #ifdef DEBUG_ARP_TABLES #define dprintf(format, args...) printk(format , ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_ARP_TABLES_USER #define duprintf(format, args...) printk(format , ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define ARP_NF_ASSERT(x) \ do { \ if (!(x)) \ printk("ARP_NF_ASSERT: %s:%s:%u\n", \ __func__, __FILE__, __LINE__); \ } while(0) #else #define ARP_NF_ASSERT(x) #endif static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, const char *hdr_addr, int len) { int i, ret; if (len > ARPT_DEV_ADDR_LEN_MAX) len = ARPT_DEV_ADDR_LEN_MAX; ret = 0; for (i = 0; i < len; i++) ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; return (ret != 0); } /* Returns whether packet matches rule or not. */ static inline int arp_packet_match(const struct arphdr *arphdr, struct net_device *dev, const char *indev, const char *outdev, const struct arpt_arp *arpinfo) { const char *arpptr = (char *)(arphdr + 1); const char *src_devaddr, *tgt_devaddr; __be32 src_ipaddr, tgt_ipaddr; int i, ret; #define FWINV(bool, invflg) ((bool) ^ !!(arpinfo->invflags & (invflg))) if (FWINV((arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop, ARPT_INV_ARPOP)) { dprintf("ARP operation field mismatch.\n"); dprintf("ar_op: %04x info->arpop: %04x info->arpop_mask: %04x\n", arphdr->ar_op, arpinfo->arpop, arpinfo->arpop_mask); return 0; } if (FWINV((arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd, ARPT_INV_ARPHRD)) { dprintf("ARP hardware address format mismatch.\n"); dprintf("ar_hrd: %04x info->arhrd: %04x info->arhrd_mask: %04x\n", arphdr->ar_hrd, arpinfo->arhrd, arpinfo->arhrd_mask); return 0; } if (FWINV((arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro, ARPT_INV_ARPPRO)) { dprintf("ARP protocol address format mismatch.\n"); dprintf("ar_pro: %04x info->arpro: %04x info->arpro_mask: %04x\n", arphdr->ar_pro, arpinfo->arpro, arpinfo->arpro_mask); return 0; } if (FWINV((arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln, ARPT_INV_ARPHLN)) { dprintf("ARP hardware address length mismatch.\n"); dprintf("ar_hln: %02x info->arhln: %02x info->arhln_mask: %02x\n", arphdr->ar_hln, arpinfo->arhln, arpinfo->arhln_mask); return 0; } src_devaddr = arpptr; arpptr += dev->addr_len; memcpy(&src_ipaddr, arpptr, sizeof(u32)); arpptr += sizeof(u32); tgt_devaddr = arpptr; arpptr += dev->addr_len; memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); if (FWINV(arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, dev->addr_len), ARPT_INV_SRCDEVADDR) || FWINV(arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, dev->addr_len), ARPT_INV_TGTDEVADDR)) { dprintf("Source or target device address mismatch.\n"); return 0; } if (FWINV((src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr, ARPT_INV_SRCIP) || FWINV(((tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr), ARPT_INV_TGTIP)) { dprintf("Source or target IP address mismatch.\n"); dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n", NIPQUAD(src_ipaddr), NIPQUAD(arpinfo->smsk.s_addr), NIPQUAD(arpinfo->src.s_addr), arpinfo->invflags & ARPT_INV_SRCIP ? " (INV)" : ""); dprintf("TGT: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n", NIPQUAD(tgt_ipaddr), NIPQUAD(arpinfo->tmsk.s_addr), NIPQUAD(arpinfo->tgt.s_addr), arpinfo->invflags & ARPT_INV_TGTIP ? " (INV)" : ""); return 0; } /* Look for ifname matches. */ for (i = 0, ret = 0; i < IFNAMSIZ; i++) { ret |= (indev[i] ^ arpinfo->iniface[i]) & arpinfo->iniface_mask[i]; } if (FWINV(ret != 0, ARPT_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, arpinfo->iniface, arpinfo->invflags&ARPT_INV_VIA_IN ?" (INV)":""); return 0; } for (i = 0, ret = 0; i < IFNAMSIZ; i++) { ret |= (outdev[i] ^ arpinfo->outiface[i]) & arpinfo->outiface_mask[i]; } if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, arpinfo->outiface, arpinfo->invflags&ARPT_INV_VIA_OUT ?" (INV)":""); return 0; } return 1; #undef FWINV } static inline int arp_checkentry(const struct arpt_arp *arp) { if (arp->flags & ~ARPT_F_MASK) { duprintf("Unknown flag bits set: %08X\n", arp->flags & ~ARPT_F_MASK); return 0; } if (arp->invflags & ~ARPT_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", arp->invflags & ~ARPT_INV_MASK); return 0; } return 1; } static unsigned int arpt_error(struct sk_buff *skb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const struct xt_target *target, const void *targinfo) { if (net_ratelimit()) printk("arp_tables: error: '%s'\n", (char *)targinfo); return NF_DROP; } static inline struct arpt_entry *get_entry(void *base, unsigned int offset) { return (struct arpt_entry *)(base + offset); } unsigned int arpt_do_table(struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, struct xt_table *table) { static const char nulldevname[IFNAMSIZ]; unsigned int verdict = NF_DROP; const struct arphdr *arp; bool hotdrop = false; struct arpt_entry *e, *back; const char *indev, *outdev; void *table_base; const struct xt_table_info *private; if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) return NF_DROP; indev = in ? in->name : nulldevname; outdev = out ? out->name : nulldevname; read_lock_bh(&table->lock); private = table->private; table_base = (void *)private->entries[smp_processor_id()]; e = get_entry(table_base, private->hook_entry[hook]); back = get_entry(table_base, private->underflow[hook]); arp = arp_hdr(skb); do { if (arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { struct arpt_entry_target *t; int hdr_len; hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + (2 * skb->dev->addr_len); ADD_COUNTER(e->counters, hdr_len, 1); t = arpt_get_target(e); /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct arpt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != ARPT_RETURN) { verdict = (unsigned)(-v) - 1; break; } e = back; back = get_entry(table_base, back->comefrom); continue; } if (table_base + v != (void *)e + e->next_offset) { /* Save old back ptr in next entry */ struct arpt_entry *next = (void *)e + e->next_offset; next->comefrom = (void *)back - table_base; /* set back pointer to next entry */ back = next; } e = get_entry(table_base, v); } else { /* Targets which reenter must return * abs. verdicts */ verdict = t->u.kernel.target->target(skb, in, out, hook, t->u.kernel.target, t->data); /* Target might have changed stuff. */ arp = arp_hdr(skb); if (verdict == ARPT_CONTINUE) e = (void *)e + e->next_offset; else /* Verdict */ break; } } else { e = (void *)e + e->next_offset; } } while (!hotdrop); read_unlock_bh(&table->lock); if (hotdrop) return NF_DROP; else return verdict; } /* All zeroes == unconditional rule. */ static inline int unconditional(const struct arpt_arp *arp) { unsigned int i; for (i = 0; i < sizeof(*arp)/sizeof(__u32); i++) if (((__u32 *)arp)[i]) return 0; return 1; } /* Figures out from what hook each rule can be called: returns 0 if * there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset * to 0 as we leave), and comefrom to save source hook bitmask. */ for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct arpt_entry *e = (struct arpt_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct arpt_standard_target *t = (void *)arpt_get_target(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) { printk("arptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); /* Unconditional return/END. */ if ((e->target_offset == sizeof(struct arpt_entry) && (strcmp(t->target.u.user.name, ARPT_STANDARD_TARGET) == 0) && t->verdict < 0 && unconditional(&e->arp)) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, ARPT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last * big jump. */ do { e->comefrom ^= (1<<NF_ARP_NUMHOOKS); oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct arpt_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct arpt_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, ARPT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct arpt_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct arpt_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static inline int check_entry(struct arpt_entry *e, const char *name) { const struct arpt_entry_target *t; if (!arp_checkentry(&e->arp)) { duprintf("arp_tables: arp check failed %p %s.\n", e, name); return -EINVAL; } if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset) return -EINVAL; t = arpt_get_target(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static inline int check_target(struct arpt_entry *e, const char *name) { struct arpt_entry_target *t; struct xt_target *target; int ret; t = arpt_get_target(e); target = t->u.kernel.target; ret = xt_check_target(target, NF_ARP, t->u.target_size - sizeof(*t), name, e->comefrom, 0, 0); if (!ret && t->u.kernel.target->checkentry && !t->u.kernel.target->checkentry(name, e, target, t->data, e->comefrom)) { duprintf("arp_tables: check failed for `%s'.\n", t->u.kernel.target->name); ret = -EINVAL; } return ret; } static inline int find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, unsigned int *i) { struct arpt_entry_target *t; struct xt_target *target; int ret; ret = check_entry(e, name); if (ret) return ret; t = arpt_get_target(e); target = try_then_request_module(xt_find_target(NF_ARP, t->u.user.name, t->u.user.revision), "arpt_%s", t->u.user.name); if (IS_ERR(target) || !target) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = target ? PTR_ERR(target) : -ENOENT; goto out; } t->u.kernel.target = target; ret = check_target(e, name); if (ret) goto err; (*i)++; return 0; err: module_put(t->u.kernel.target->me); out: return ret; } static inline int check_entry_size_and_hooks(struct arpt_entry *e, struct xt_table_info *newinfo, unsigned char *base, unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int *i) { unsigned int h; if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || (unsigned char *)e + sizeof(struct arpt_entry) >= limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct arpt_entry) + sizeof(struct arpt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* FIXME: underflows must be unconditional, standard verdicts < 0 (not ARPT_RETURN). --RR */ /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; (*i)++; return 0; } static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i) { struct arpt_entry_target *t; if (i && (*i)-- == 0) return 1; t = arpt_get_target(e); if (t->u.kernel.target->destroy) t->u.kernel.target->destroy(t->u.kernel.target, t->data); module_put(t->u.kernel.target->me); return 0; } /* Checks and translates the user-supplied table segment (held in * newinfo). */ static int translate_table(const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, void *entry0, unsigned int size, unsigned int number, const unsigned int *hook_entries, const unsigned int *underflows) { unsigned int i; int ret; newinfo->size = size; newinfo->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, check_entry_size_and_hooks, newinfo, entry0, entry0 + size, hook_entries, underflows, &i); duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret); if (ret != 0) return ret; if (i != number) { duprintf("translate_table: %u not %u entries\n", i, number); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, valid_hooks, entry0)) { duprintf("Looping hook\n"); return -ELOOP; } /* Finally, each sanity check must pass */ i = 0; ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size, find_check_entry, name, size, &i); if (ret != 0) { ARPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i); return ret; } /* And one copy for every other CPU */ for_each_possible_cpu(i) { if (newinfo->entries[i] && newinfo->entries[i] != entry0) memcpy(newinfo->entries[i], entry0, newinfo->size); } return ret; } /* Gets counters. */ static inline int add_entry_to_counter(const struct arpt_entry *e, struct xt_counters total[], unsigned int *i) { ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); (*i)++; return 0; } static inline int set_entry_to_counter(const struct arpt_entry *e, struct xt_counters total[], unsigned int *i) { SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt); (*i)++; return 0; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { unsigned int cpu; unsigned int i; unsigned int curcpu; /* Instead of clearing (by a previous call to memset()) * the counters and using adds, we set the counters * with data used by 'current' CPU * We dont care about preemption here. */ curcpu = raw_smp_processor_id(); i = 0; ARPT_ENTRY_ITERATE(t->entries[curcpu], t->size, set_entry_to_counter, counters, &i); for_each_possible_cpu(cpu) { if (cpu == curcpu) continue; i = 0; ARPT_ENTRY_ITERATE(t->entries[cpu], t->size, add_entry_to_counter, counters, &i); } } static inline struct xt_counters *alloc_counters(struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change * (other than comefrom, which userspace doesn't care * about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vmalloc_node(countersize, numa_node_id()); if (counters == NULL) return ERR_PTR(-ENOMEM); /* First, sum counters... */ write_lock_bh(&table->lock); get_counters(private, counters); write_unlock_bh(&table->lock); return counters; } static int copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { unsigned int off, num; struct arpt_entry *e; struct xt_counters *counters; struct xt_table_info *private = table->private; int ret = 0; void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries[raw_smp_processor_id()]; /* ... then copy entire thing ... */ if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ struct arpt_entry_target *t; e = (struct arpt_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct arpt_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } t = arpt_get_target(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct arpt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(NF_ARP, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(NF_ARP, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(struct arpt_entry *e, const struct xt_table_info *info, void *base, struct xt_table_info *newinfo) { struct arpt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - base; t = arpt_get_target(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(NF_ARP, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct arpt_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct arpt_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { void *loc_cpu_entry; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries[] */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries[raw_smp_processor_id()]; return ARPT_ENTRY_ITERATE(loc_cpu_entry, info->size, compat_calc_entry, info, loc_cpu_entry, newinfo); } #endif static int get_info(struct net *net, void __user *user, int *len, int compat) { char name[ARPT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct arpt_getinfo)) { duprintf("length %u != %Zu\n", *len, sizeof(struct arpt_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[ARPT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(NF_ARP); #endif t = try_then_request_module(xt_find_table_lock(net, NF_ARP, name), "arptable_%s", name); if (t && !IS_ERR(t)) { struct arpt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT if (compat) { struct xt_table_info tmp; ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(NF_ARP); private = &tmp; } #endif info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(NF_ARP); #endif return ret; } static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, int *len) { int ret; struct arpt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %Zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct arpt_get_entries) + get.size) { duprintf("get_entries: %u != %Zu\n", *len, sizeof(struct arpt_get_entries) + get.size); return -EINVAL; } t = xt_find_table_lock(net, NF_ARP, get.name); if (t && !IS_ERR(t)) { const struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; void *loc_cpu_old_entry; ret = 0; counters = vmalloc_node(num_counters * sizeof(struct xt_counters), numa_node_id()); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, NF_ARP, name), "arptable_%s", name); if (!t || IS_ERR(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters. */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, NULL); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) ret = -EFAULT; vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct arpt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; /* choose the copy that is on our node/cpu */ loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(tmp.name, tmp.valid_hooks, newinfo, loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("arp_tables: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); free_newinfo: xt_free_table_info(newinfo); return ret; } /* We're lazy, and add to the first CPU; overflow works its fey magic * and everything is OK. */ static inline int add_counter_to_entry(struct arpt_entry *e, const struct xt_counters addme[], unsigned int *i) { ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); (*i)++; return 0; } static int do_add_counters(struct net *net, void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; const char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; void *loc_cpu_entry; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc_node(len - size, numa_node_id()); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, NF_ARP, name); if (!t || IS_ERR(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } write_lock_bh(&t->lock); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; /* Choose the copy that is on our node */ loc_cpu_entry = private->entries[smp_processor_id()]; ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size, add_counter_to_entry, paddc, &i); unlock_up_free: write_unlock_bh(&t->lock); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT static inline int compat_release_entry(struct compat_arpt_entry *e, unsigned int *i) { struct arpt_entry_target *t; if (i && (*i)-- == 0) return 1; t = compat_arpt_get_target(e); module_put(t->u.kernel.target->me); return 0; } static inline int check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, struct xt_table_info *newinfo, unsigned int *size, unsigned char *base, unsigned char *limit, unsigned int *hook_entries, unsigned int *underflows, unsigned int *i, const char *name) { struct arpt_entry_target *t; struct xt_target *target; unsigned int entry_offset; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_arpt_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct arpt_entry *)e, name); if (ret) return ret; off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); entry_offset = (void *)e - (void *)base; t = compat_arpt_get_target(e); target = try_then_request_module(xt_find_target(NF_ARP, t->u.user.name, t->u.user.revision), "arpt_%s", t->u.user.name); if (IS_ERR(target) || !target) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = target ? PTR_ERR(target) : -ENOENT; goto out; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(NF_ARP, entry_offset, off); if (ret) goto release_target; /* Check hooks & underflows */ for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; (*i)++; return 0; release_target: module_put(t->u.kernel.target->me); out: return ret; } static int compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct arpt_entry_target *t; struct xt_target *target; struct arpt_entry *de; unsigned int origsize; int ret, h; ret = 0; origsize = *size; de = (struct arpt_entry *)*dstptr; memcpy(de, e, sizeof(struct arpt_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct arpt_entry); *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); de->target_offset = e->target_offset - (origsize - *size); t = compat_arpt_get_target(e); target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_ARP_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static inline int compat_check_entry(struct arpt_entry *e, const char *name, unsigned int *i) { int ret; ret = check_target(e, name); if (ret) return ret; (*i)++; return 0; } static int translate_compat_table(const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; unsigned int size; int ret; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(NF_ARP); /* Walk through entries, checking offsets. */ ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, check_compat_entry_size_and_hooks, info, &size, entry0, entry0 + total_size, hook_entries, underflows, &j, name); if (ret != 0) goto out_unlock; ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_ARP_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries[raw_smp_processor_id()]; pos = entry1; size = total_size; ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, compat_copy_entry_from_user, &pos, &size, name, newinfo, entry1); xt_compat_flush_offsets(NF_ARP); xt_compat_unlock(NF_ARP); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; ret = ARPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry, name, &i); if (ret) { j -= i; COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i, compat_release_entry, &j); ARPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i); xt_free_table_info(newinfo); return ret; } /* And one copy for every other CPU */ for_each_possible_cpu(i) if (newinfo->entries[i] && newinfo->entries[i] != entry1) memcpy(newinfo->entries[i], entry1, newinfo->size); *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j); return ret; out_unlock: xt_compat_flush_offsets(NF_ARP); xt_compat_unlock(NF_ARP); goto out; } struct compat_arpt_replace { char name[ARPT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_ARP_NUMHOOKS]; u32 underflow[NF_ARP_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; struct compat_arpt_entry entries[0]; }; static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_arpt_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; /* choose the copy that is on our node/cpu */ loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, compat_uint_t *size, struct xt_counters *counters, unsigned int *i) { struct arpt_entry_target *t; struct compat_arpt_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; int ret; ret = -EFAULT; origsize = *size; ce = (struct compat_arpt_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct arpt_entry))) goto out; if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i]))) goto out; *dstptr += sizeof(struct compat_arpt_entry); *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); target_offset = e->target_offset - (origsize - *size); t = arpt_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) goto out; ret = -EFAULT; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset)) goto out; if (put_user(next_offset, &ce->next_offset)) goto out; (*i)++; return 0; out: return ret; } static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; void *loc_cpu_entry; unsigned int i = 0; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); /* choose the copy on our node/cpu */ loc_cpu_entry = private->entries[raw_smp_processor_id()]; pos = userptr; size = total_size; ret = ARPT_ENTRY_ITERATE(loc_cpu_entry, total_size, compat_copy_entry_to_user, &pos, &size, counters, &i); vfree(counters); return ret; } struct compat_arpt_get_entries { char name[ARPT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_arpt_entry entrytable[0]; }; static int compat_get_entries(struct net *net, struct compat_arpt_get_entries __user *uptr, int *len) { int ret; struct compat_arpt_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_arpt_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(NF_ARP); t = xt_find_table_lock(net, NF_ARP, get.name); if (t && !IS_ERR(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(NF_ARP); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(NF_ARP); return ret; } static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case ARPT_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_arpt_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_arpt_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case ARPT_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case ARPT_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case ARPT_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } try_then_request_module(xt_find_revision(NF_ARP, rev.name, rev.revision, 1, &ret), "arpt_%s", rev.name); break; } default: duprintf("do_arpt_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } struct xt_table *arpt_register_table(struct net *net, struct xt_table *table, const struct arpt_replace *repl) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = { 0, 0, 0, { 0 }, { 0 }, { } }; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) { ret = -ENOMEM; goto out; } /* choose the copy on our node/cpu */ loc_cpu_entry = newinfo->entries[raw_smp_processor_id()]; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(table->name, table->valid_hooks, newinfo, loc_cpu_entry, repl->size, repl->num_entries, repl->hook_entry, repl->underflow); duprintf("arpt_register_table: translate table gives %d\n", ret); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } return new_table; out_free: xt_free_table_info(newinfo); out: return ERR_PTR(ret); } void arpt_unregister_table(struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries[raw_smp_processor_id()]; ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } /* The built-in targets: standard (NULL) and error. */ static struct xt_target arpt_standard_target __read_mostly = { .name = ARPT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NF_ARP, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }; static struct xt_target arpt_error_target __read_mostly = { .name = ARPT_ERROR_TARGET, .target = arpt_error, .targetsize = ARPT_FUNCTION_MAXNAMELEN, .family = NF_ARP, }; static struct nf_sockopt_ops arpt_sockopts = { .pf = PF_INET, .set_optmin = ARPT_BASE_CTL, .set_optmax = ARPT_SO_SET_MAX+1, .set = do_arpt_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_arpt_set_ctl, #endif .get_optmin = ARPT_BASE_CTL, .get_optmax = ARPT_SO_GET_MAX+1, .get = do_arpt_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_arpt_get_ctl, #endif .owner = THIS_MODULE, }; static int __net_init arp_tables_net_init(struct net *net) { return xt_proto_init(net, NF_ARP); } static void __net_exit arp_tables_net_exit(struct net *net) { xt_proto_fini(net, NF_ARP); } static struct pernet_operations arp_tables_net_ops = { .init = arp_tables_net_init, .exit = arp_tables_net_exit, }; static int __init arp_tables_init(void) { int ret; ret = register_pernet_subsys(&arp_tables_net_ops); if (ret < 0) goto err1; /* Noone else will be downing sem now, so we won't sleep */ ret = xt_register_target(&arpt_standard_target); if (ret < 0) goto err2; ret = xt_register_target(&arpt_error_target); if (ret < 0) goto err3; /* Register setsockopt */ ret = nf_register_sockopt(&arpt_sockopts); if (ret < 0) goto err4; printk(KERN_INFO "arp_tables: (C) 2002 David S. Miller\n"); return 0; err4: xt_unregister_target(&arpt_error_target); err3: xt_unregister_target(&arpt_standard_target); err2: unregister_pernet_subsys(&arp_tables_net_ops); err1: return ret; } static void __exit arp_tables_fini(void) { nf_unregister_sockopt(&arpt_sockopts); xt_unregister_target(&arpt_error_target); xt_unregister_target(&arpt_standard_target); unregister_pernet_subsys(&arp_tables_net_ops); } EXPORT_SYMBOL(arpt_register_table); EXPORT_SYMBOL(arpt_unregister_table); EXPORT_SYMBOL(arpt_do_table); module_init(arp_tables_init); module_exit(arp_tables_fini);
gpl-2.0
froggy666uk/froggy_grouper_kernel
drivers/dma/txx9dmac.c
261
35485
/* * Driver for the TXx9 SoC DMA Controller * * Copyright (C) 2009 Atsushi Nemoto * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/scatterlist.h> #include "dmaengine.h" #include "txx9dmac.h" static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) { return container_of(chan, struct txx9dmac_chan, chan); } static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc) { return dc->ch_regs; } static struct txx9dmac_cregs32 __iomem *__dma_regs32( const struct txx9dmac_chan *dc) { return dc->ch_regs; } #define channel64_readq(dc, name) \ __raw_readq(&(__dma_regs(dc)->name)) #define channel64_writeq(dc, name, val) \ __raw_writeq((val), &(__dma_regs(dc)->name)) #define channel64_readl(dc, name) \ __raw_readl(&(__dma_regs(dc)->name)) #define channel64_writel(dc, name, val) \ __raw_writel((val), &(__dma_regs(dc)->name)) #define channel32_readl(dc, name) \ __raw_readl(&(__dma_regs32(dc)->name)) #define channel32_writel(dc, name, val) \ __raw_writel((val), &(__dma_regs32(dc)->name)) #define channel_readq(dc, name) channel64_readq(dc, name) #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val) #define channel_readl(dc, name) \ (is_dmac64(dc) ? \ channel64_readl(dc, name) : channel32_readl(dc, name)) #define channel_writel(dc, name, val) \ (is_dmac64(dc) ? \ channel64_writel(dc, name, val) : channel32_writel(dc, name, val)) static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc) { if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) return channel64_readq(dc, CHAR); else return channel64_readl(dc, CHAR); } static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) { if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) channel64_writeq(dc, CHAR, val); else channel64_writel(dc, CHAR, val); } static void channel64_clear_CHAR(const struct txx9dmac_chan *dc) { #if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) channel64_writel(dc, CHAR, 0); channel64_writel(dc, __pad_CHAR, 0); #else channel64_writeq(dc, CHAR, 0); #endif } static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc) { if (is_dmac64(dc)) return channel64_read_CHAR(dc); else return channel32_readl(dc, CHAR); } static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) { if (is_dmac64(dc)) channel64_write_CHAR(dc, val); else channel32_writel(dc, CHAR, val); } static struct txx9dmac_regs __iomem *__txx9dmac_regs( const struct txx9dmac_dev *ddev) { return ddev->regs; } static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32( const struct txx9dmac_dev *ddev) { return ddev->regs; } #define dma64_readl(ddev, name) \ __raw_readl(&(__txx9dmac_regs(ddev)->name)) #define dma64_writel(ddev, name, val) \ __raw_writel((val), &(__txx9dmac_regs(ddev)->name)) #define dma32_readl(ddev, name) \ __raw_readl(&(__txx9dmac_regs32(ddev)->name)) #define dma32_writel(ddev, name, val) \ __raw_writel((val), &(__txx9dmac_regs32(ddev)->name)) #define dma_readl(ddev, name) \ (__is_dmac64(ddev) ? \ dma64_readl(ddev, name) : dma32_readl(ddev, name)) #define dma_writel(ddev, name, val) \ (__is_dmac64(ddev) ? \ dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val)) static struct device *chan2dev(struct dma_chan *chan) { return &chan->dev->device; } static struct device *chan2parent(struct dma_chan *chan) { return chan->dev->device.parent; } static struct txx9dmac_desc * txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd) { return container_of(txd, struct txx9dmac_desc, txd); } static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc, const struct txx9dmac_desc *desc) { return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR; } static void desc_write_CHAR(const struct txx9dmac_chan *dc, struct txx9dmac_desc *desc, dma_addr_t val) { if (is_dmac64(dc)) desc->hwdesc.CHAR = val; else desc->hwdesc32.CHAR = val; } #define TXX9_DMA_MAX_COUNT 0x04000000 #define TXX9_DMA_INITIAL_DESC_COUNT 64 static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc) { return list_entry(dc->active_list.next, struct txx9dmac_desc, desc_node); } static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc) { return list_entry(dc->active_list.prev, struct txx9dmac_desc, desc_node); } static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc) { return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node); } static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) { if (!list_empty(&desc->tx_list)) desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node); return desc; } static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx); static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc, gfp_t flags) { struct txx9dmac_dev *ddev = dc->ddev; struct txx9dmac_desc *desc; desc = kzalloc(sizeof(*desc), flags); if (!desc) return NULL; INIT_LIST_HEAD(&desc->tx_list); dma_async_tx_descriptor_init(&desc->txd, &dc->chan); desc->txd.tx_submit = txx9dmac_tx_submit; /* txd.flags will be overwritten in prep funcs */ desc->txd.flags = DMA_CTRL_ACK; desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc, ddev->descsize, DMA_TO_DEVICE); return desc; } static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc) { struct txx9dmac_desc *desc, *_desc; struct txx9dmac_desc *ret = NULL; unsigned int i = 0; spin_lock_bh(&dc->lock); list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) { if (async_tx_test_ack(&desc->txd)) { list_del(&desc->desc_node); ret = desc; break; } dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc); i++; } spin_unlock_bh(&dc->lock); dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n", i); if (!ret) { ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC); if (ret) { spin_lock_bh(&dc->lock); dc->descs_allocated++; spin_unlock_bh(&dc->lock); } else dev_err(chan2dev(&dc->chan), "not enough descriptors available\n"); } return ret; } static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) { struct txx9dmac_dev *ddev = dc->ddev; struct txx9dmac_desc *child; list_for_each_entry(child, &desc->tx_list, desc_node) dma_sync_single_for_cpu(chan2parent(&dc->chan), child->txd.phys, ddev->descsize, DMA_TO_DEVICE); dma_sync_single_for_cpu(chan2parent(&dc->chan), desc->txd.phys, ddev->descsize, DMA_TO_DEVICE); } /* * Move a descriptor, including any children, to the free list. * `desc' must not be on any lists. */ static void txx9dmac_desc_put(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) { if (desc) { struct txx9dmac_desc *child; txx9dmac_sync_desc_for_cpu(dc, desc); spin_lock_bh(&dc->lock); list_for_each_entry(child, &desc->tx_list, desc_node) dev_vdbg(chan2dev(&dc->chan), "moving child desc %p to freelist\n", child); list_splice_init(&desc->tx_list, &dc->free_list); dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", desc); list_add(&desc->desc_node, &dc->free_list); spin_unlock_bh(&dc->lock); } } /*----------------------------------------------------------------------*/ static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) { if (is_dmac64(dc)) dev_err(chan2dev(&dc->chan), " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x" " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", (u64)channel64_read_CHAR(dc), channel64_readq(dc, SAR), channel64_readq(dc, DAR), channel64_readl(dc, CNTR), channel64_readl(dc, SAIR), channel64_readl(dc, DAIR), channel64_readl(dc, CCR), channel64_readl(dc, CSR)); else dev_err(chan2dev(&dc->chan), " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x" " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", channel32_readl(dc, CHAR), channel32_readl(dc, SAR), channel32_readl(dc, DAR), channel32_readl(dc, CNTR), channel32_readl(dc, SAIR), channel32_readl(dc, DAIR), channel32_readl(dc, CCR), channel32_readl(dc, CSR)); } static void txx9dmac_reset_chan(struct txx9dmac_chan *dc) { channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST); if (is_dmac64(dc)) { channel64_clear_CHAR(dc); channel_writeq(dc, SAR, 0); channel_writeq(dc, DAR, 0); } else { channel_writel(dc, CHAR, 0); channel_writel(dc, SAR, 0); channel_writel(dc, DAR, 0); } channel_writel(dc, CNTR, 0); channel_writel(dc, SAIR, 0); channel_writel(dc, DAIR, 0); channel_writel(dc, CCR, 0); mmiowb(); } /* Called with dc->lock held and bh disabled */ static void txx9dmac_dostart(struct txx9dmac_chan *dc, struct txx9dmac_desc *first) { struct txx9dmac_slave *ds = dc->chan.private; u32 sai, dai; dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n", first->txd.cookie, first); /* ASSERT: channel is idle */ if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { dev_err(chan2dev(&dc->chan), "BUG: Attempted to start non-idle channel\n"); txx9dmac_dump_regs(dc); /* The tasklet will hopefully advance the queue... */ return; } if (is_dmac64(dc)) { channel64_writel(dc, CNTR, 0); channel64_writel(dc, CSR, 0xffffffff); if (ds) { if (ds->tx_reg) { sai = ds->reg_width; dai = 0; } else { sai = 0; dai = ds->reg_width; } } else { sai = 8; dai = 8; } channel64_writel(dc, SAIR, sai); channel64_writel(dc, DAIR, dai); /* All 64-bit DMAC supports SMPCHN */ channel64_writel(dc, CCR, dc->ccr); /* Writing a non zero value to CHAR will assert XFACT */ channel64_write_CHAR(dc, first->txd.phys); } else { channel32_writel(dc, CNTR, 0); channel32_writel(dc, CSR, 0xffffffff); if (ds) { if (ds->tx_reg) { sai = ds->reg_width; dai = 0; } else { sai = 0; dai = ds->reg_width; } } else { sai = 4; dai = 4; } channel32_writel(dc, SAIR, sai); channel32_writel(dc, DAIR, dai); if (txx9_dma_have_SMPCHN()) { channel32_writel(dc, CCR, dc->ccr); /* Writing a non zero value to CHAR will assert XFACT */ channel32_writel(dc, CHAR, first->txd.phys); } else { channel32_writel(dc, CHAR, first->txd.phys); channel32_writel(dc, CCR, dc->ccr); } } } /*----------------------------------------------------------------------*/ static void txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) { dma_async_tx_callback callback; void *param; struct dma_async_tx_descriptor *txd = &desc->txd; struct txx9dmac_slave *ds = dc->chan.private; dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", txd->cookie, desc); dma_cookie_complete(txd); callback = txd->callback; param = txd->callback_param; txx9dmac_sync_desc_for_cpu(dc, desc); list_splice_init(&desc->tx_list, &dc->free_list); list_move(&desc->desc_node, &dc->free_list); if (!ds) { dma_addr_t dmaaddr; if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { dmaaddr = is_dmac64(dc) ? desc->hwdesc.DAR : desc->hwdesc32.DAR; if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) dma_unmap_single(chan2parent(&dc->chan), dmaaddr, desc->len, DMA_FROM_DEVICE); else dma_unmap_page(chan2parent(&dc->chan), dmaaddr, desc->len, DMA_FROM_DEVICE); } if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { dmaaddr = is_dmac64(dc) ? desc->hwdesc.SAR : desc->hwdesc32.SAR; if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) dma_unmap_single(chan2parent(&dc->chan), dmaaddr, desc->len, DMA_TO_DEVICE); else dma_unmap_page(chan2parent(&dc->chan), dmaaddr, desc->len, DMA_TO_DEVICE); } } /* * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here */ if (callback) callback(param); dma_run_dependencies(txd); } static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list) { struct txx9dmac_dev *ddev = dc->ddev; struct txx9dmac_desc *desc; struct txx9dmac_desc *prev = NULL; BUG_ON(!list_empty(list)); do { desc = txx9dmac_first_queued(dc); if (prev) { desc_write_CHAR(dc, prev, desc->txd.phys); dma_sync_single_for_device(chan2parent(&dc->chan), prev->txd.phys, ddev->descsize, DMA_TO_DEVICE); } prev = txx9dmac_last_child(desc); list_move_tail(&desc->desc_node, list); /* Make chain-completion interrupt happen */ if ((desc->txd.flags & DMA_PREP_INTERRUPT) && !txx9dmac_chan_INTENT(dc)) break; } while (!list_empty(&dc->queue)); } static void txx9dmac_complete_all(struct txx9dmac_chan *dc) { struct txx9dmac_desc *desc, *_desc; LIST_HEAD(list); /* * Submit queued descriptors ASAP, i.e. before we go through * the completed ones. */ list_splice_init(&dc->active_list, &list); if (!list_empty(&dc->queue)) { txx9dmac_dequeue(dc, &dc->active_list); txx9dmac_dostart(dc, txx9dmac_first_active(dc)); } list_for_each_entry_safe(desc, _desc, &list, desc_node) txx9dmac_descriptor_complete(dc, desc); } static void txx9dmac_dump_desc(struct txx9dmac_chan *dc, struct txx9dmac_hwdesc *desc) { if (is_dmac64(dc)) { #ifdef TXX9_DMA_USE_SIMPLE_CHAIN dev_crit(chan2dev(&dc->chan), " desc: ch%#llx s%#llx d%#llx c%#x\n", (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR); #else dev_crit(chan2dev(&dc->chan), " desc: ch%#llx s%#llx d%#llx c%#x" " si%#x di%#x cc%#x cs%#x\n", (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR, desc->SAIR, desc->DAIR, desc->CCR, desc->CSR); #endif } else { struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc; #ifdef TXX9_DMA_USE_SIMPLE_CHAIN dev_crit(chan2dev(&dc->chan), " desc: ch%#x s%#x d%#x c%#x\n", d->CHAR, d->SAR, d->DAR, d->CNTR); #else dev_crit(chan2dev(&dc->chan), " desc: ch%#x s%#x d%#x c%#x" " si%#x di%#x cc%#x cs%#x\n", d->CHAR, d->SAR, d->DAR, d->CNTR, d->SAIR, d->DAIR, d->CCR, d->CSR); #endif } } static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr) { struct txx9dmac_desc *bad_desc; struct txx9dmac_desc *child; u32 errors; /* * The descriptor currently at the head of the active list is * borked. Since we don't have any way to report errors, we'll * just have to scream loudly and try to carry on. */ dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n"); txx9dmac_dump_regs(dc); bad_desc = txx9dmac_first_active(dc); list_del_init(&bad_desc->desc_node); /* Clear all error flags and try to restart the controller */ errors = csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR | TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR); channel_writel(dc, CSR, errors); if (list_empty(&dc->active_list) && !list_empty(&dc->queue)) txx9dmac_dequeue(dc, &dc->active_list); if (!list_empty(&dc->active_list)) txx9dmac_dostart(dc, txx9dmac_first_active(dc)); dev_crit(chan2dev(&dc->chan), "Bad descriptor submitted for DMA! (cookie: %d)\n", bad_desc->txd.cookie); txx9dmac_dump_desc(dc, &bad_desc->hwdesc); list_for_each_entry(child, &bad_desc->tx_list, desc_node) txx9dmac_dump_desc(dc, &child->hwdesc); /* Pretend the descriptor completed successfully */ txx9dmac_descriptor_complete(dc, bad_desc); } static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc) { dma_addr_t chain; struct txx9dmac_desc *desc, *_desc; struct txx9dmac_desc *child; u32 csr; if (is_dmac64(dc)) { chain = channel64_read_CHAR(dc); csr = channel64_readl(dc, CSR); channel64_writel(dc, CSR, csr); } else { chain = channel32_readl(dc, CHAR); csr = channel32_readl(dc, CSR); channel32_writel(dc, CSR, csr); } /* For dynamic chain, we should look at XFACT instead of NCHNC */ if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) { /* Everything we've submitted is done */ txx9dmac_complete_all(dc); return; } if (!(csr & TXX9_DMA_CSR_CHNEN)) chain = 0; /* last descriptor of this chain */ dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n", (u64)chain); list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) { if (desc_read_CHAR(dc, desc) == chain) { /* This one is currently in progress */ if (csr & TXX9_DMA_CSR_ABCHC) goto scan_done; return; } list_for_each_entry(child, &desc->tx_list, desc_node) if (desc_read_CHAR(dc, child) == chain) { /* Currently in progress */ if (csr & TXX9_DMA_CSR_ABCHC) goto scan_done; return; } /* * No descriptors so far seem to be in progress, i.e. * this one must be done. */ txx9dmac_descriptor_complete(dc, desc); } scan_done: if (csr & TXX9_DMA_CSR_ABCHC) { txx9dmac_handle_error(dc, csr); return; } dev_err(chan2dev(&dc->chan), "BUG: All descriptors done, but channel not idle!\n"); /* Try to continue after resetting the channel... */ txx9dmac_reset_chan(dc); if (!list_empty(&dc->queue)) { txx9dmac_dequeue(dc, &dc->active_list); txx9dmac_dostart(dc, txx9dmac_first_active(dc)); } } static void txx9dmac_chan_tasklet(unsigned long data) { int irq; u32 csr; struct txx9dmac_chan *dc; dc = (struct txx9dmac_chan *)data; csr = channel_readl(dc, CSR); dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr); spin_lock(&dc->lock); if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | TXX9_DMA_CSR_NTRNFC)) txx9dmac_scan_descriptors(dc); spin_unlock(&dc->lock); irq = dc->irq; enable_irq(irq); } static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id) { struct txx9dmac_chan *dc = dev_id; dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n", channel_readl(dc, CSR)); tasklet_schedule(&dc->tasklet); /* * Just disable the interrupts. We'll turn them back on in the * softirq handler. */ disable_irq_nosync(irq); return IRQ_HANDLED; } static void txx9dmac_tasklet(unsigned long data) { int irq; u32 csr; struct txx9dmac_chan *dc; struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data; u32 mcr; int i; mcr = dma_readl(ddev, MCR); dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr); for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) { if ((mcr >> (24 + i)) & 0x11) { dc = ddev->chan[i]; csr = channel_readl(dc, CSR); dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr); spin_lock(&dc->lock); if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | TXX9_DMA_CSR_NTRNFC)) txx9dmac_scan_descriptors(dc); spin_unlock(&dc->lock); } } irq = ddev->irq; enable_irq(irq); } static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id) { struct txx9dmac_dev *ddev = dev_id; dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n", dma_readl(ddev, MCR)); tasklet_schedule(&ddev->tasklet); /* * Just disable the interrupts. We'll turn them back on in the * softirq handler. */ disable_irq_nosync(irq); return IRQ_HANDLED; } /*----------------------------------------------------------------------*/ static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) { struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx); struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan); dma_cookie_t cookie; spin_lock_bh(&dc->lock); cookie = dma_cookie_assign(tx); dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", desc->txd.cookie, desc); list_add_tail(&desc->desc_node, &dc->queue); spin_unlock_bh(&dc->lock); return cookie; } static struct dma_async_tx_descriptor * txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); struct txx9dmac_dev *ddev = dc->ddev; struct txx9dmac_desc *desc; struct txx9dmac_desc *first; struct txx9dmac_desc *prev; size_t xfer_count; size_t offset; dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n", (u64)dest, (u64)src, len, flags); if (unlikely(!len)) { dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); return NULL; } prev = first = NULL; for (offset = 0; offset < len; offset += xfer_count) { xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT); /* * Workaround for ERT-TX49H2-033, ERT-TX49H3-020, * ERT-TX49H4-016 (slightly conservative) */ if (__is_dmac64(ddev)) { if (xfer_count > 0x100 && (xfer_count & 0xff) >= 0xfa && (xfer_count & 0xff) <= 0xff) xfer_count -= 0x20; } else { if (xfer_count > 0x80 && (xfer_count & 0x7f) >= 0x7e && (xfer_count & 0x7f) <= 0x7f) xfer_count -= 0x20; } desc = txx9dmac_desc_get(dc); if (!desc) { txx9dmac_desc_put(dc, first); return NULL; } if (__is_dmac64(ddev)) { desc->hwdesc.SAR = src + offset; desc->hwdesc.DAR = dest + offset; desc->hwdesc.CNTR = xfer_count; txx9dmac_desc_set_nosimple(ddev, desc, 8, 8, dc->ccr | TXX9_DMA_CCR_XFACT); } else { desc->hwdesc32.SAR = src + offset; desc->hwdesc32.DAR = dest + offset; desc->hwdesc32.CNTR = xfer_count; txx9dmac_desc_set_nosimple(ddev, desc, 4, 4, dc->ccr | TXX9_DMA_CCR_XFACT); } /* * The descriptors on tx_list are not reachable from * the dc->queue list or dc->active_list after a * submit. If we put all descriptors on active_list, * calling of callback on the completion will be more * complex. */ if (!first) { first = desc; } else { desc_write_CHAR(dc, prev, desc->txd.phys); dma_sync_single_for_device(chan2parent(&dc->chan), prev->txd.phys, ddev->descsize, DMA_TO_DEVICE); list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; } /* Trigger interrupt after last block */ if (flags & DMA_PREP_INTERRUPT) txx9dmac_desc_set_INTENT(ddev, prev); desc_write_CHAR(dc, prev, 0); dma_sync_single_for_device(chan2parent(&dc->chan), prev->txd.phys, ddev->descsize, DMA_TO_DEVICE); first->txd.flags = flags; first->len = len; return &first->txd; } static struct dma_async_tx_descriptor * txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); struct txx9dmac_dev *ddev = dc->ddev; struct txx9dmac_slave *ds = chan->private; struct txx9dmac_desc *prev; struct txx9dmac_desc *first; unsigned int i; struct scatterlist *sg; dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); BUG_ON(!ds || !ds->reg_width); if (ds->tx_reg) BUG_ON(direction != DMA_TO_DEVICE); else BUG_ON(direction != DMA_FROM_DEVICE); if (unlikely(!sg_len)) return NULL; prev = first = NULL; for_each_sg(sgl, sg, sg_len, i) { struct txx9dmac_desc *desc; dma_addr_t mem; u32 sai, dai; desc = txx9dmac_desc_get(dc); if (!desc) { txx9dmac_desc_put(dc, first); return NULL; } mem = sg_dma_address(sg); if (__is_dmac64(ddev)) { if (direction == DMA_TO_DEVICE) { desc->hwdesc.SAR = mem; desc->hwdesc.DAR = ds->tx_reg; } else { desc->hwdesc.SAR = ds->rx_reg; desc->hwdesc.DAR = mem; } desc->hwdesc.CNTR = sg_dma_len(sg); } else { if (direction == DMA_TO_DEVICE) { desc->hwdesc32.SAR = mem; desc->hwdesc32.DAR = ds->tx_reg; } else { desc->hwdesc32.SAR = ds->rx_reg; desc->hwdesc32.DAR = mem; } desc->hwdesc32.CNTR = sg_dma_len(sg); } if (direction == DMA_TO_DEVICE) { sai = ds->reg_width; dai = 0; } else { sai = 0; dai = ds->reg_width; } txx9dmac_desc_set_nosimple(ddev, desc, sai, dai, dc->ccr | TXX9_DMA_CCR_XFACT); if (!first) { first = desc; } else { desc_write_CHAR(dc, prev, desc->txd.phys); dma_sync_single_for_device(chan2parent(&dc->chan), prev->txd.phys, ddev->descsize, DMA_TO_DEVICE); list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; } /* Trigger interrupt after last block */ if (flags & DMA_PREP_INTERRUPT) txx9dmac_desc_set_INTENT(ddev, prev); desc_write_CHAR(dc, prev, 0); dma_sync_single_for_device(chan2parent(&dc->chan), prev->txd.phys, ddev->descsize, DMA_TO_DEVICE); first->txd.flags = flags; first->len = 0; return &first->txd; } static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); struct txx9dmac_desc *desc, *_desc; LIST_HEAD(list); /* Only supports DMA_TERMINATE_ALL */ if (cmd != DMA_TERMINATE_ALL) return -EINVAL; dev_vdbg(chan2dev(chan), "terminate_all\n"); spin_lock_bh(&dc->lock); txx9dmac_reset_chan(dc); /* active_list entries will end up before queued entries */ list_splice_init(&dc->queue, &list); list_splice_init(&dc->active_list, &list); spin_unlock_bh(&dc->lock); /* Flush all pending and queued descriptors */ list_for_each_entry_safe(desc, _desc, &list, desc_node) txx9dmac_descriptor_complete(dc, desc); return 0; } static enum dma_status txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); if (ret != DMA_SUCCESS) { spin_lock_bh(&dc->lock); txx9dmac_scan_descriptors(dc); spin_unlock_bh(&dc->lock); ret = dma_cookie_status(chan, cookie, txstate); } return ret; } static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, struct txx9dmac_desc *prev) { struct txx9dmac_dev *ddev = dc->ddev; struct txx9dmac_desc *desc; LIST_HEAD(list); prev = txx9dmac_last_child(prev); txx9dmac_dequeue(dc, &list); desc = list_entry(list.next, struct txx9dmac_desc, desc_node); desc_write_CHAR(dc, prev, desc->txd.phys); dma_sync_single_for_device(chan2parent(&dc->chan), prev->txd.phys, ddev->descsize, DMA_TO_DEVICE); mmiowb(); if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) && channel_read_CHAR(dc) == prev->txd.phys) /* Restart chain DMA */ channel_write_CHAR(dc, desc->txd.phys); list_splice_tail(&list, &dc->active_list); } static void txx9dmac_issue_pending(struct dma_chan *chan) { struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); spin_lock_bh(&dc->lock); if (!list_empty(&dc->active_list)) txx9dmac_scan_descriptors(dc); if (!list_empty(&dc->queue)) { if (list_empty(&dc->active_list)) { txx9dmac_dequeue(dc, &dc->active_list); txx9dmac_dostart(dc, txx9dmac_first_active(dc)); } else if (txx9_dma_have_SMPCHN()) { struct txx9dmac_desc *prev = txx9dmac_last_active(dc); if (!(prev->txd.flags & DMA_PREP_INTERRUPT) || txx9dmac_chan_INTENT(dc)) txx9dmac_chain_dynamic(dc, prev); } } spin_unlock_bh(&dc->lock); } static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) { struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); struct txx9dmac_slave *ds = chan->private; struct txx9dmac_desc *desc; int i; dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); /* ASSERT: channel is idle */ if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); return -EIO; } dma_cookie_init(chan); dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; txx9dmac_chan_set_SMPCHN(dc); if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN)) dc->ccr |= TXX9_DMA_CCR_INTENC; if (chan->device->device_prep_dma_memcpy) { if (ds) return -EINVAL; dc->ccr |= TXX9_DMA_CCR_XFSZ_X8; } else { if (!ds || (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg)) return -EINVAL; dc->ccr |= TXX9_DMA_CCR_EXTRQ | TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width)); txx9dmac_chan_set_INTENT(dc); } spin_lock_bh(&dc->lock); i = dc->descs_allocated; while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) { spin_unlock_bh(&dc->lock); desc = txx9dmac_desc_alloc(dc, GFP_KERNEL); if (!desc) { dev_info(chan2dev(chan), "only allocated %d descriptors\n", i); spin_lock_bh(&dc->lock); break; } txx9dmac_desc_put(dc, desc); spin_lock_bh(&dc->lock); i = ++dc->descs_allocated; } spin_unlock_bh(&dc->lock); dev_dbg(chan2dev(chan), "alloc_chan_resources allocated %d descriptors\n", i); return i; } static void txx9dmac_free_chan_resources(struct dma_chan *chan) { struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); struct txx9dmac_dev *ddev = dc->ddev; struct txx9dmac_desc *desc, *_desc; LIST_HEAD(list); dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", dc->descs_allocated); /* ASSERT: channel is idle */ BUG_ON(!list_empty(&dc->active_list)); BUG_ON(!list_empty(&dc->queue)); BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT); spin_lock_bh(&dc->lock); list_splice_init(&dc->free_list, &list); dc->descs_allocated = 0; spin_unlock_bh(&dc->lock); list_for_each_entry_safe(desc, _desc, &list, desc_node) { dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); dma_unmap_single(chan2parent(chan), desc->txd.phys, ddev->descsize, DMA_TO_DEVICE); kfree(desc); } dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); } /*----------------------------------------------------------------------*/ static void txx9dmac_off(struct txx9dmac_dev *ddev) { dma_writel(ddev, MCR, 0); mmiowb(); } static int __init txx9dmac_chan_probe(struct platform_device *pdev) { struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data; struct platform_device *dmac_dev = cpdata->dmac_dev; struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data; struct txx9dmac_chan *dc; int err; int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS; int irq; dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); if (!dc) return -ENOMEM; dc->dma.dev = &pdev->dev; dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; dc->dma.device_control = txx9dmac_control; dc->dma.device_tx_status = txx9dmac_tx_status; dc->dma.device_issue_pending = txx9dmac_issue_pending; if (pdata && pdata->memcpy_chan == ch) { dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask); } else { dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg; dma_cap_set(DMA_SLAVE, dc->dma.cap_mask); dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask); } INIT_LIST_HEAD(&dc->dma.channels); dc->ddev = platform_get_drvdata(dmac_dev); if (dc->ddev->irq < 0) { irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet, (unsigned long)dc); dc->irq = irq; err = devm_request_irq(&pdev->dev, dc->irq, txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc); if (err) return err; } else dc->irq = -1; dc->ddev->chan[ch] = dc; dc->chan.device = &dc->dma; list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); dma_cookie_init(&dc->chan); if (is_dmac64(dc)) dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; else dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch]; spin_lock_init(&dc->lock); INIT_LIST_HEAD(&dc->active_list); INIT_LIST_HEAD(&dc->queue); INIT_LIST_HEAD(&dc->free_list); txx9dmac_reset_chan(dc); platform_set_drvdata(pdev, dc); err = dma_async_device_register(&dc->dma); if (err) return err; dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n", dc->dma.dev_id, dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "", dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : ""); return 0; } static int __exit txx9dmac_chan_remove(struct platform_device *pdev) { struct txx9dmac_chan *dc = platform_get_drvdata(pdev); dma_async_device_unregister(&dc->dma); if (dc->irq >= 0) tasklet_kill(&dc->tasklet); dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; return 0; } static int __init txx9dmac_probe(struct platform_device *pdev) { struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; struct resource *io; struct txx9dmac_dev *ddev; u32 mcr; int err; io = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!io) return -EINVAL; ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL); if (!ddev) return -ENOMEM; if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), dev_name(&pdev->dev))) return -EBUSY; ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io)); if (!ddev->regs) return -ENOMEM; ddev->have_64bit_regs = pdata->have_64bit_regs; if (__is_dmac64(ddev)) ddev->descsize = sizeof(struct txx9dmac_hwdesc); else ddev->descsize = sizeof(struct txx9dmac_hwdesc32); /* force dma off, just in case */ txx9dmac_off(ddev); ddev->irq = platform_get_irq(pdev, 0); if (ddev->irq >= 0) { tasklet_init(&ddev->tasklet, txx9dmac_tasklet, (unsigned long)ddev); err = devm_request_irq(&pdev->dev, ddev->irq, txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev); if (err) return err; } mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; if (pdata && pdata->memcpy_chan >= 0) mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); dma_writel(ddev, MCR, mcr); platform_set_drvdata(pdev, ddev); return 0; } static int __exit txx9dmac_remove(struct platform_device *pdev) { struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); txx9dmac_off(ddev); if (ddev->irq >= 0) tasklet_kill(&ddev->tasklet); return 0; } static void txx9dmac_shutdown(struct platform_device *pdev) { struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); txx9dmac_off(ddev); } static int txx9dmac_suspend_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); txx9dmac_off(ddev); return 0; } static int txx9dmac_resume_noirq(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; u32 mcr; mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; if (pdata && pdata->memcpy_chan >= 0) mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); dma_writel(ddev, MCR, mcr); return 0; } static const struct dev_pm_ops txx9dmac_dev_pm_ops = { .suspend_noirq = txx9dmac_suspend_noirq, .resume_noirq = txx9dmac_resume_noirq, }; static struct platform_driver txx9dmac_chan_driver = { .remove = __exit_p(txx9dmac_chan_remove), .driver = { .name = "txx9dmac-chan", }, }; static struct platform_driver txx9dmac_driver = { .remove = __exit_p(txx9dmac_remove), .shutdown = txx9dmac_shutdown, .driver = { .name = "txx9dmac", .pm = &txx9dmac_dev_pm_ops, }, }; static int __init txx9dmac_init(void) { int rc; rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe); if (!rc) { rc = platform_driver_probe(&txx9dmac_chan_driver, txx9dmac_chan_probe); if (rc) platform_driver_unregister(&txx9dmac_driver); } return rc; } module_init(txx9dmac_init); static void __exit txx9dmac_exit(void) { platform_driver_unregister(&txx9dmac_chan_driver); platform_driver_unregister(&txx9dmac_driver); } module_exit(txx9dmac_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TXx9 DMA Controller driver"); MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); MODULE_ALIAS("platform:txx9dmac"); MODULE_ALIAS("platform:txx9dmac-chan");
gpl-2.0
hejiann/android_kernel_huawei_u8860
drivers/net/wireless/bcmdhd/dhd_linux_mon.c
517
10685
/* * Broadcom Dongle Host Driver (DHD), Linux monitor network interface * * Copyright (C) 1999-2011, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: dhd_linux_mon.c,v 1.131.2.55 2011-02-09 05:31:56 Exp $ */ #include <linux/string.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/ieee80211.h> #include <linux/rtnetlink.h> #include <net/ieee80211_radiotap.h> #include <wlioctl.h> #include <bcmutils.h> #include <linux_osl.h> #include <dhd_dbg.h> #include <dngl_stats.h> #include <dhd.h> typedef enum monitor_states { MONITOR_STATE_DEINIT = 0x0, MONITOR_STATE_INIT = 0x1, MONITOR_STATE_INTERFACE_ADDED = 0x2, MONITOR_STATE_INTERFACE_DELETED = 0x4 } monitor_states_t; extern int dhd_start_xmit(struct sk_buff *skb, struct net_device *net); /** * Local declarations and defintions (not exposed) */ #define MON_PRINT(format, ...) printk("DHD-MON: %s " format, __func__, ##__VA_ARGS__) #define MON_TRACE MON_PRINT typedef struct monitor_interface { int radiotap_enabled; struct net_device* real_ndev; /* The real interface that the monitor is on */ struct net_device* mon_ndev; } monitor_interface; typedef struct dhd_linux_monitor { void *dhd_pub; monitor_states_t monitor_state; monitor_interface mon_if[DHD_MAX_IFS]; struct mutex lock; /* lock to protect mon_if */ } dhd_linux_monitor_t; static dhd_linux_monitor_t g_monitor; static struct net_device* lookup_real_netdev(char *name); static monitor_interface* ndev_to_monif(struct net_device *ndev); static int dhd_mon_if_open(struct net_device *ndev); static int dhd_mon_if_stop(struct net_device *ndev); static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev); static void dhd_mon_if_set_multicast_list(struct net_device *ndev); static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr); static const struct net_device_ops dhd_mon_if_ops = { .ndo_open = dhd_mon_if_open, .ndo_stop = dhd_mon_if_stop, .ndo_start_xmit = dhd_mon_if_subif_start_xmit, .ndo_set_multicast_list = dhd_mon_if_set_multicast_list, .ndo_set_mac_address = dhd_mon_if_change_mac, }; /** * Local static function defintions */ /* Look up dhd's net device table to find a match (e.g. interface "eth0" is a match for "mon.eth0" * "p2p-eth0-0" is a match for "mon.p2p-eth0-0") */ static struct net_device* lookup_real_netdev(char *name) { int i; int last_name_len = 0; struct net_device *ndev; struct net_device *ndev_found = NULL; /* We want to find interface "p2p-eth0-0" for monitor interface "mon.p2p-eth0-0", so * we skip "eth0" even if "mon.p2p-eth0-0" contains "eth0" */ for (i = 0; i < DHD_MAX_IFS; i++) { ndev = dhd_idx2net(g_monitor.dhd_pub, i); if (ndev && strstr(name, ndev->name)) { if (strlen(ndev->name) > last_name_len) { ndev_found = ndev; last_name_len = strlen(ndev->name); } } } return ndev_found; } static monitor_interface* ndev_to_monif(struct net_device *ndev) { int i; for (i = 0; i < DHD_MAX_IFS; i++) { if (g_monitor.mon_if[i].mon_ndev == ndev) return &g_monitor.mon_if[i]; } return NULL; } static int dhd_mon_if_open(struct net_device *ndev) { int ret = 0; MON_PRINT("enter\n"); return ret; } static int dhd_mon_if_stop(struct net_device *ndev) { int ret = 0; MON_PRINT("enter\n"); return ret; } static int dhd_mon_if_subif_start_xmit(struct sk_buff *skb, struct net_device *ndev) { int ret = 0; int rtap_len; int qos_len = 0; int dot11_hdr_len = 24; int snap_len = 6; unsigned char *pdata; unsigned short frame_ctl; unsigned char src_mac_addr[6]; unsigned char dst_mac_addr[6]; struct ieee80211_hdr *dot11_hdr; struct ieee80211_radiotap_header *rtap_hdr; monitor_interface* mon_if; MON_PRINT("enter\n"); mon_if = ndev_to_monif(ndev); if (mon_if == NULL || mon_if->real_ndev == NULL) { MON_PRINT(" cannot find matched net dev, skip the packet\n"); goto fail; } if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) goto fail; rtap_hdr = (struct ieee80211_radiotap_header *)skb->data; if (unlikely(rtap_hdr->it_version)) goto fail; rtap_len = ieee80211_get_radiotap_len(skb->data); if (unlikely(skb->len < rtap_len)) goto fail; MON_PRINT("radiotap len (should be 14): %d\n", rtap_len); /* Skip the ratio tap header */ skb_pull(skb, rtap_len); dot11_hdr = (struct ieee80211_hdr *)skb->data; frame_ctl = le16_to_cpu(dot11_hdr->frame_control); /* Check if the QoS bit is set */ if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { /* Check if this ia a Wireless Distribution System (WDS) frame * which has 4 MAC addresses */ if (dot11_hdr->frame_control & 0x0080) qos_len = 2; if ((dot11_hdr->frame_control & 0x0300) == 0x0300) dot11_hdr_len += 6; memcpy(dst_mac_addr, dot11_hdr->addr1, sizeof(dst_mac_addr)); memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr)); /* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for * for two MAC addresses */ skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2); pdata = (unsigned char*)skb->data; memcpy(pdata, dst_mac_addr, sizeof(dst_mac_addr)); memcpy(pdata + sizeof(dst_mac_addr), src_mac_addr, sizeof(src_mac_addr)); MON_PRINT("if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); /* Use the real net device to transmit the packet */ ret = dhd_start_xmit(skb, mon_if->real_ndev); return ret; } fail: dev_kfree_skb(skb); return 0; } static void dhd_mon_if_set_multicast_list(struct net_device *ndev) { monitor_interface* mon_if; mon_if = ndev_to_monif(ndev); if (mon_if == NULL || mon_if->real_ndev == NULL) { MON_PRINT(" cannot find matched net dev, skip the packet\n"); } MON_PRINT("enter, if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); } static int dhd_mon_if_change_mac(struct net_device *ndev, void *addr) { int ret = 0; monitor_interface* mon_if; mon_if = ndev_to_monif(ndev); if (mon_if == NULL || mon_if->real_ndev == NULL) { MON_PRINT(" cannot find matched net dev, skip the packet\n"); } MON_PRINT("enter, if name: %s, matched if name %s\n", ndev->name, mon_if->real_ndev->name); return ret; } /** * Global function definitions (declared in dhd_linux_mon.h) */ int dhd_add_monitor(char *name, struct net_device **new_ndev) { int i; int idx = -1; int ret = 0; struct net_device* ndev = NULL; dhd_linux_monitor_t **dhd_mon; mutex_lock(&g_monitor.lock); MON_TRACE("enter, if name: %s\n", name); if (!name || !new_ndev) { MON_PRINT("invalid parameters\n"); ret = -EINVAL; goto out; } /* * Find a vacancy */ for (i = 0; i < DHD_MAX_IFS; i++) if (g_monitor.mon_if[i].mon_ndev == NULL) { idx = i; break; } if (idx == -1) { MON_PRINT("exceeds maximum interfaces\n"); ret = -EFAULT; goto out; } ndev = alloc_etherdev(sizeof(dhd_linux_monitor_t*)); if (!ndev) { MON_PRINT("failed to allocate memory\n"); ret = -ENOMEM; goto out; } ndev->type = ARPHRD_IEEE80211_RADIOTAP; strncpy(ndev->name, name, IFNAMSIZ); ndev->name[IFNAMSIZ - 1] = 0; ndev->netdev_ops = &dhd_mon_if_ops; ret = register_netdevice(ndev); if (ret) { MON_PRINT(" register_netdevice failed (%d)\n", ret); goto out; } *new_ndev = ndev; g_monitor.mon_if[idx].radiotap_enabled = TRUE; g_monitor.mon_if[idx].mon_ndev = ndev; g_monitor.mon_if[idx].real_ndev = lookup_real_netdev(name); dhd_mon = (dhd_linux_monitor_t **)netdev_priv(ndev); *dhd_mon = &g_monitor; g_monitor.monitor_state = MONITOR_STATE_INTERFACE_ADDED; MON_PRINT("net device returned: 0x%p\n", ndev); MON_PRINT("found a matched net device, name %s\n", g_monitor.mon_if[idx].real_ndev->name); out: if (ret && ndev) free_netdev(ndev); mutex_unlock(&g_monitor.lock); return ret; } int dhd_del_monitor(struct net_device *ndev) { int i; bool rollback_lock = false; if (!ndev) return -EINVAL; mutex_lock(&g_monitor.lock); for (i = 0; i < DHD_MAX_IFS; i++) { if (g_monitor.mon_if[i].mon_ndev == ndev || g_monitor.mon_if[i].real_ndev == ndev) { g_monitor.mon_if[i].real_ndev = NULL; if (rtnl_is_locked()) { rtnl_unlock(); rollback_lock = true; } unregister_netdev(g_monitor.mon_if[i].mon_ndev); free_netdev(g_monitor.mon_if[i].mon_ndev); g_monitor.mon_if[i].mon_ndev = NULL; g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED; break; } } if (rollback_lock) { rtnl_lock(); rollback_lock = false; } if (g_monitor.monitor_state != MONITOR_STATE_INTERFACE_DELETED) MON_PRINT("interface not found in monitor IF array, is this a monitor IF? 0x%p\n", ndev); mutex_unlock(&g_monitor.lock); return 0; } int dhd_monitor_init(void *dhd_pub) { if (g_monitor.monitor_state == MONITOR_STATE_DEINIT) { g_monitor.dhd_pub = dhd_pub; mutex_init(&g_monitor.lock); g_monitor.monitor_state = MONITOR_STATE_INIT; } return 0; } int dhd_monitor_uninit(void) { int i; struct net_device *ndev; bool rollback_lock = false; mutex_lock(&g_monitor.lock); if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) { for (i = 0; i < DHD_MAX_IFS; i++) { ndev = g_monitor.mon_if[i].mon_ndev; if (ndev) { if (rtnl_is_locked()) { rtnl_unlock(); rollback_lock = true; } unregister_netdev(ndev); free_netdev(ndev); g_monitor.mon_if[i].real_ndev = NULL; g_monitor.mon_if[i].mon_ndev = NULL; if (rollback_lock) { rtnl_lock(); rollback_lock = false; } } } g_monitor.monitor_state = MONITOR_STATE_DEINIT; } mutex_unlock(&g_monitor.lock); return 0; }
gpl-2.0
aatjitra/PR26
drivers/staging/iio/addac/adt7316.c
2309
58746
/* * ADT7316 digital temperature sensor driver supporting ADT7316/7/8 ADT7516/7/9 * * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/workqueue.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/i2c.h> #include <linux/rtc.h> #include "../iio.h" #include "../sysfs.h" #include "adt7316.h" /* * ADT7316 registers definition */ #define ADT7316_INT_STAT1 0x0 #define ADT7316_INT_STAT2 0x1 #define ADT7316_LSB_IN_TEMP_VDD 0x3 #define ADT7316_LSB_IN_TEMP_MASK 0x3 #define ADT7316_LSB_VDD_MASK 0xC #define ADT7316_LSB_VDD_OFFSET 2 #define ADT7316_LSB_EX_TEMP_AIN 0x4 #define ADT7316_LSB_EX_TEMP_MASK 0x3 #define ADT7516_LSB_AIN_SHIFT 2 #define ADT7316_AD_MSB_DATA_BASE 0x6 #define ADT7316_AD_MSB_DATA_REGS 3 #define ADT7516_AD_MSB_DATA_REGS 6 #define ADT7316_MSB_VDD 0x6 #define ADT7316_MSB_IN_TEMP 0x7 #define ADT7316_MSB_EX_TEMP 0x8 #define ADT7516_MSB_AIN1 0x8 #define ADT7516_MSB_AIN2 0x9 #define ADT7516_MSB_AIN3 0xA #define ADT7516_MSB_AIN4 0xB #define ADT7316_DA_DATA_BASE 0x10 #define ADT7316_DA_MSB_DATA_REGS 4 #define ADT7316_LSB_DAC_A 0x10 #define ADT7316_MSB_DAC_A 0x11 #define ADT7316_LSB_DAC_B 0x12 #define ADT7316_MSB_DAC_B 0x13 #define ADT7316_LSB_DAC_C 0x14 #define ADT7316_MSB_DAC_C 0x15 #define ADT7316_LSB_DAC_D 0x16 #define ADT7316_MSB_DAC_D 0x17 #define ADT7316_CONFIG1 0x18 #define ADT7316_CONFIG2 0x19 #define ADT7316_CONFIG3 0x1A #define ADT7316_LDAC_CONFIG 0x1B #define ADT7316_DAC_CONFIG 0x1C #define ADT7316_INT_MASK1 0x1D #define ADT7316_INT_MASK2 0x1E #define ADT7316_IN_TEMP_OFFSET 0x1F #define ADT7316_EX_TEMP_OFFSET 0x20 #define ADT7316_IN_ANALOG_TEMP_OFFSET 0x21 #define ADT7316_EX_ANALOG_TEMP_OFFSET 0x22 #define ADT7316_VDD_HIGH 0x23 #define ADT7316_VDD_LOW 0x24 #define ADT7316_IN_TEMP_HIGH 0x25 #define ADT7316_IN_TEMP_LOW 0x26 #define ADT7316_EX_TEMP_HIGH 0x27 #define ADT7316_EX_TEMP_LOW 0x28 #define ADT7516_AIN2_HIGH 0x2B #define ADT7516_AIN2_LOW 0x2C #define ADT7516_AIN3_HIGH 0x2D #define ADT7516_AIN3_LOW 0x2E #define ADT7516_AIN4_HIGH 0x2F #define ADT7516_AIN4_LOW 0x30 #define ADT7316_DEVICE_ID 0x4D #define ADT7316_MANUFACTURE_ID 0x4E #define ADT7316_DEVICE_REV 0x4F #define ADT7316_SPI_LOCK_STAT 0x7F /* * ADT7316 config1 */ #define ADT7316_EN 0x1 #define ADT7516_SEL_EX_TEMP 0x4 #define ADT7516_SEL_AIN1_2_EX_TEMP_MASK 0x6 #define ADT7516_SEL_AIN3 0x8 #define ADT7316_INT_EN 0x20 #define ADT7316_INT_POLARITY 0x40 #define ADT7316_PD 0x80 /* * ADT7316 config2 */ #define ADT7316_AD_SINGLE_CH_MASK 0x3 #define ADT7516_AD_SINGLE_CH_MASK 0x7 #define ADT7316_AD_SINGLE_CH_VDD 0 #define ADT7316_AD_SINGLE_CH_IN 1 #define ADT7316_AD_SINGLE_CH_EX 2 #define ADT7516_AD_SINGLE_CH_AIN1 2 #define ADT7516_AD_SINGLE_CH_AIN2 3 #define ADT7516_AD_SINGLE_CH_AIN3 4 #define ADT7516_AD_SINGLE_CH_AIN4 5 #define ADT7316_AD_SINGLE_CH_MODE 0x10 #define ADT7316_DISABLE_AVERAGING 0x20 #define ADT7316_EN_SMBUS_TIMEOUT 0x40 #define ADT7316_RESET 0x80 /* * ADT7316 config3 */ #define ADT7316_ADCLK_22_5 0x1 #define ADT7316_DA_HIGH_RESOLUTION 0x2 #define ADT7316_DA_EN_VIA_DAC_LDCA 0x4 #define ADT7516_AIN_IN_VREF 0x10 #define ADT7316_EN_IN_TEMP_PROP_DACA 0x20 #define ADT7316_EN_EX_TEMP_PROP_DACB 0x40 /* * ADT7316 DAC config */ #define ADT7316_DA_2VREF_CH_MASK 0xF #define ADT7316_DA_EN_MODE_MASK 0x30 #define ADT7316_DA_EN_MODE_SINGLE 0x00 #define ADT7316_DA_EN_MODE_AB_CD 0x10 #define ADT7316_DA_EN_MODE_ABCD 0x20 #define ADT7316_DA_EN_MODE_LDAC 0x30 #define ADT7316_VREF_BYPASS_DAC_AB 0x40 #define ADT7316_VREF_BYPASS_DAC_CD 0x80 /* * ADT7316 LDAC config */ #define ADT7316_LDAC_EN_DA_MASK 0xF #define ADT7316_DAC_IN_VREF 0x10 #define ADT7516_DAC_AB_IN_VREF 0x10 #define ADT7516_DAC_CD_IN_VREF 0x20 #define ADT7516_DAC_IN_VREF_OFFSET 4 #define ADT7516_DAC_IN_VREF_MASK 0x30 /* * ADT7316 INT_MASK2 */ #define ADT7316_INT_MASK2_VDD 0x10 /* * ADT7316 value masks */ #define ADT7316_VALUE_MASK 0xfff #define ADT7316_T_VALUE_SIGN 0x400 #define ADT7316_T_VALUE_FLOAT_OFFSET 2 #define ADT7316_T_VALUE_FLOAT_MASK 0x2 /* * Chip ID */ #define ID_ADT7316 0x1 #define ID_ADT7317 0x2 #define ID_ADT7318 0x3 #define ID_ADT7516 0x11 #define ID_ADT7517 0x12 #define ID_ADT7519 0x14 #define ID_FAMILY_MASK 0xF0 #define ID_ADT73XX 0x0 #define ID_ADT75XX 0x10 /* * struct adt7316_chip_info - chip specifc information */ struct adt7316_chip_info { struct iio_dev *indio_dev; struct adt7316_bus bus; u16 ldac_pin; u16 int_mask; /* 0x2f */ u8 config1; u8 config2; u8 config3; u8 dac_config; /* DAC config */ u8 ldac_config; /* LDAC config */ u8 dac_bits; /* 8, 10, 12 */ u8 id; /* chip id */ }; /* * Logic interrupt mask for user application to enable * interrupts. */ #define ADT7316_IN_TEMP_HIGH_INT_MASK 0x1 #define ADT7316_IN_TEMP_LOW_INT_MASK 0x2 #define ADT7316_EX_TEMP_HIGH_INT_MASK 0x4 #define ADT7316_EX_TEMP_LOW_INT_MASK 0x8 #define ADT7316_EX_TEMP_FAULT_INT_MASK 0x10 #define ADT7516_AIN1_INT_MASK 0x4 #define ADT7516_AIN2_INT_MASK 0x20 #define ADT7516_AIN3_INT_MASK 0x40 #define ADT7516_AIN4_INT_MASK 0x80 #define ADT7316_VDD_INT_MASK 0x100 #define ADT7316_TEMP_INT_MASK 0x1F #define ADT7516_AIN_INT_MASK 0xE0 #define ADT7316_TEMP_AIN_INT_MASK \ (ADT7316_TEMP_INT_MASK | ADT7316_TEMP_INT_MASK) /* * struct adt7316_chip_info - chip specifc information */ struct adt7316_limit_regs { u16 data_high; u16 data_low; }; static ssize_t adt7316_show_enabled(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_EN)); } static ssize_t _adt7316_store_enabled(struct adt7316_chip_info *chip, int enable) { u8 config1; int ret; if (enable) config1 = chip->config1 | ADT7316_EN; else config1 = chip->config1 & ~ADT7316_EN; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1); if (ret) return -EIO; chip->config1 = config1; return ret; } static ssize_t adt7316_store_enabled(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; int enable; if (!memcmp(buf, "1", 1)) enable = 1; else enable = 0; if (_adt7316_store_enabled(chip, enable) < 0) return -EIO; else return len; } static IIO_DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, adt7316_show_enabled, adt7316_store_enabled, 0); static ssize_t adt7316_show_select_ex_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX) return -EPERM; return sprintf(buf, "%d\n", !!(chip->config1 & ADT7516_SEL_EX_TEMP)); } static ssize_t adt7316_store_select_ex_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config1; int ret; if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX) return -EPERM; config1 = chip->config1 & (~ADT7516_SEL_EX_TEMP); if (!memcmp(buf, "1", 1)) config1 |= ADT7516_SEL_EX_TEMP; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1); if (ret) return -EIO; chip->config1 = config1; return len; } static IIO_DEVICE_ATTR(select_ex_temp, S_IRUGO | S_IWUSR, adt7316_show_select_ex_temp, adt7316_store_select_ex_temp, 0); static ssize_t adt7316_show_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if (chip->config2 & ADT7316_AD_SINGLE_CH_MODE) return sprintf(buf, "single_channel\n"); else return sprintf(buf, "round_robin\n"); } static ssize_t adt7316_store_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config2; int ret; config2 = chip->config2 & (~ADT7316_AD_SINGLE_CH_MODE); if (!memcmp(buf, "single_channel", 14)) config2 |= ADT7316_AD_SINGLE_CH_MODE; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2); if (ret) return -EIO; chip->config2 = config2; return len; } static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, adt7316_show_mode, adt7316_store_mode, 0); static ssize_t adt7316_show_all_modes(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "single_channel\nround_robin\n"); } static IIO_DEVICE_ATTR(all_modes, S_IRUGO, adt7316_show_all_modes, NULL, 0); static ssize_t adt7316_show_ad_channel(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE)) return -EPERM; switch (chip->config2 & ADT7516_AD_SINGLE_CH_MASK) { case ADT7316_AD_SINGLE_CH_VDD: return sprintf(buf, "0 - VDD\n"); case ADT7316_AD_SINGLE_CH_IN: return sprintf(buf, "1 - Internal Temperature\n"); case ADT7316_AD_SINGLE_CH_EX: if (((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) && (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0) return sprintf(buf, "2 - AIN1\n"); else return sprintf(buf, "2 - External Temperature\n"); case ADT7516_AD_SINGLE_CH_AIN2: if ((chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0) return sprintf(buf, "3 - AIN2\n"); else return sprintf(buf, "N/A\n"); case ADT7516_AD_SINGLE_CH_AIN3: if (chip->config1 & ADT7516_SEL_AIN3) return sprintf(buf, "4 - AIN3\n"); else return sprintf(buf, "N/A\n"); case ADT7516_AD_SINGLE_CH_AIN4: return sprintf(buf, "5 - AIN4\n"); default: return sprintf(buf, "N/A\n"); } } static ssize_t adt7316_store_ad_channel(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config2; unsigned long data = 0; int ret; if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE)) return -EPERM; ret = strict_strtoul(buf, 10, &data); if (ret) return -EINVAL; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) { if (data > 5) return -EINVAL; config2 = chip->config2 & (~ADT7516_AD_SINGLE_CH_MASK); } else { if (data > 2) return -EINVAL; config2 = chip->config2 & (~ADT7316_AD_SINGLE_CH_MASK); } config2 |= data; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2); if (ret) return -EIO; chip->config2 = config2; return len; } static IIO_DEVICE_ATTR(ad_channel, S_IRUGO | S_IWUSR, adt7316_show_ad_channel, adt7316_store_ad_channel, 0); static ssize_t adt7316_show_all_ad_channels(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE)) return -EPERM; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n" "2 - External Temperature or AIN1\n" "3 - AIN2\n4 - AIN3\n5 - AIN4\n"); else return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n" "2 - External Temperature\n"); } static IIO_DEVICE_ATTR(all_ad_channels, S_IRUGO, adt7316_show_all_ad_channels, NULL, 0); static ssize_t adt7316_show_disable_averaging(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config2 & ADT7316_DISABLE_AVERAGING)); } static ssize_t adt7316_store_disable_averaging(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config2; int ret; config2 = chip->config2 & (~ADT7316_DISABLE_AVERAGING); if (!memcmp(buf, "1", 1)) config2 |= ADT7316_DISABLE_AVERAGING; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2); if (ret) return -EIO; chip->config2 = config2; return len; } static IIO_DEVICE_ATTR(disable_averaging, S_IRUGO | S_IWUSR, adt7316_show_disable_averaging, adt7316_store_disable_averaging, 0); static ssize_t adt7316_show_enable_smbus_timeout(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config2 & ADT7316_EN_SMBUS_TIMEOUT)); } static ssize_t adt7316_store_enable_smbus_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config2; int ret; config2 = chip->config2 & (~ADT7316_EN_SMBUS_TIMEOUT); if (!memcmp(buf, "1", 1)) config2 |= ADT7316_EN_SMBUS_TIMEOUT; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2); if (ret) return -EIO; chip->config2 = config2; return len; } static IIO_DEVICE_ATTR(enable_smbus_timeout, S_IRUGO | S_IWUSR, adt7316_show_enable_smbus_timeout, adt7316_store_enable_smbus_timeout, 0); static ssize_t adt7316_store_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config2; int ret; config2 = chip->config2 | ADT7316_RESET; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2); if (ret) return -EIO; return len; } static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adt7316_store_reset, 0); static ssize_t adt7316_show_powerdown(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_PD)); } static ssize_t adt7316_store_powerdown(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config1; int ret; config1 = chip->config1 & (~ADT7316_PD); if (!memcmp(buf, "1", 1)) config1 |= ADT7316_PD; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1); if (ret) return -EIO; chip->config1 = config1; return len; } static IIO_DEVICE_ATTR(powerdown, S_IRUGO | S_IWUSR, adt7316_show_powerdown, adt7316_store_powerdown, 0); static ssize_t adt7316_show_fast_ad_clock(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config3 & ADT7316_ADCLK_22_5)); } static ssize_t adt7316_store_fast_ad_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config3; int ret; config3 = chip->config3 & (~ADT7316_ADCLK_22_5); if (!memcmp(buf, "1", 1)) config3 |= ADT7316_ADCLK_22_5; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3); if (ret) return -EIO; chip->config3 = config3; return len; } static IIO_DEVICE_ATTR(fast_ad_clock, S_IRUGO | S_IWUSR, adt7316_show_fast_ad_clock, adt7316_store_fast_ad_clock, 0); static ssize_t adt7316_show_da_high_resolution(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if (chip->config3 & ADT7316_DA_HIGH_RESOLUTION) { if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516) return sprintf(buf, "1 (12 bits)\n"); else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517) return sprintf(buf, "1 (10 bits)\n"); } return sprintf(buf, "0 (8 bits)\n"); } static ssize_t adt7316_store_da_high_resolution(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config3; int ret; chip->dac_bits = 8; if (!memcmp(buf, "1", 1)) { config3 = chip->config3 | ADT7316_DA_HIGH_RESOLUTION; if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516) chip->dac_bits = 12; else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517) chip->dac_bits = 10; } else config3 = chip->config3 & (~ADT7316_DA_HIGH_RESOLUTION); ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3); if (ret) return -EIO; chip->config3 = config3; return len; } static IIO_DEVICE_ATTR(da_high_resolution, S_IRUGO | S_IWUSR, adt7316_show_da_high_resolution, adt7316_store_da_high_resolution, 0); static ssize_t adt7316_show_AIN_internal_Vref(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX) return -EPERM; return sprintf(buf, "%d\n", !!(chip->config3 & ADT7516_AIN_IN_VREF)); } static ssize_t adt7316_store_AIN_internal_Vref(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config3; int ret; if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX) return -EPERM; if (memcmp(buf, "1", 1)) config3 = chip->config3 & (~ADT7516_AIN_IN_VREF); else config3 = chip->config3 | ADT7516_AIN_IN_VREF; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3); if (ret) return -EIO; chip->config3 = config3; return len; } static IIO_DEVICE_ATTR(AIN_internal_Vref, S_IRUGO | S_IWUSR, adt7316_show_AIN_internal_Vref, adt7316_store_AIN_internal_Vref, 0); static ssize_t adt7316_show_enable_prop_DACA(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)); } static ssize_t adt7316_store_enable_prop_DACA(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config3; int ret; config3 = chip->config3 & (~ADT7316_EN_IN_TEMP_PROP_DACA); if (!memcmp(buf, "1", 1)) config3 |= ADT7316_EN_IN_TEMP_PROP_DACA; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3); if (ret) return -EIO; chip->config3 = config3; return len; } static IIO_DEVICE_ATTR(enable_proportion_DACA, S_IRUGO | S_IWUSR, adt7316_show_enable_prop_DACA, adt7316_store_enable_prop_DACA, 0); static ssize_t adt7316_show_enable_prop_DACB(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB)); } static ssize_t adt7316_store_enable_prop_DACB(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config3; int ret; config3 = chip->config3 & (~ADT7316_EN_EX_TEMP_PROP_DACB); if (!memcmp(buf, "1", 1)) config3 |= ADT7316_EN_EX_TEMP_PROP_DACB; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3); if (ret) return -EIO; chip->config3 = config3; return len; } static IIO_DEVICE_ATTR(enable_proportion_DACB, S_IRUGO | S_IWUSR, adt7316_show_enable_prop_DACB, adt7316_store_enable_prop_DACB, 0); static ssize_t adt7316_show_DAC_2Vref_ch_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "0x%x\n", chip->dac_config & ADT7316_DA_2VREF_CH_MASK); } static ssize_t adt7316_store_DAC_2Vref_ch_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 dac_config; unsigned long data = 0; int ret; ret = strict_strtoul(buf, 16, &data); if (ret || data > ADT7316_DA_2VREF_CH_MASK) return -EINVAL; dac_config = chip->dac_config & (~ADT7316_DA_2VREF_CH_MASK); dac_config |= data; ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config); if (ret) return -EIO; chip->dac_config = dac_config; return len; } static IIO_DEVICE_ATTR(DAC_2Vref_channels_mask, S_IRUGO | S_IWUSR, adt7316_show_DAC_2Vref_ch_mask, adt7316_store_DAC_2Vref_ch_mask, 0); static ssize_t adt7316_show_DAC_update_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA)) return sprintf(buf, "manual\n"); else { switch (chip->dac_config & ADT7316_DA_EN_MODE_MASK) { case ADT7316_DA_EN_MODE_SINGLE: return sprintf(buf, "0 - auto at any MSB DAC writing\n"); case ADT7316_DA_EN_MODE_AB_CD: return sprintf(buf, "1 - auto at MSB DAC AB and CD writing\n"); case ADT7316_DA_EN_MODE_ABCD: return sprintf(buf, "2 - auto at MSB DAC ABCD writing\n"); default: /* ADT7316_DA_EN_MODE_LDAC */ return sprintf(buf, "3 - manual\n"); } } } static ssize_t adt7316_store_DAC_update_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 dac_config; unsigned long data; int ret; if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA)) return -EPERM; ret = strict_strtoul(buf, 10, &data); if (ret || data > ADT7316_DA_EN_MODE_MASK) return -EINVAL; dac_config = chip->dac_config & (~ADT7316_DA_EN_MODE_MASK); dac_config |= data; ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config); if (ret) return -EIO; chip->dac_config = dac_config; return len; } static IIO_DEVICE_ATTR(DAC_update_mode, S_IRUGO | S_IWUSR, adt7316_show_DAC_update_mode, adt7316_store_DAC_update_mode, 0); static ssize_t adt7316_show_all_DAC_update_modes(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA) return sprintf(buf, "0 - auto at any MSB DAC writing\n" "1 - auto at MSB DAC AB and CD writing\n" "2 - auto at MSB DAC ABCD writing\n" "3 - manual\n"); else return sprintf(buf, "manual\n"); } static IIO_DEVICE_ATTR(all_DAC_update_modes, S_IRUGO, adt7316_show_all_DAC_update_modes, NULL, 0); static ssize_t adt7316_store_update_DAC(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 ldac_config; unsigned long data; int ret; if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA) { if ((chip->dac_config & ADT7316_DA_EN_MODE_MASK) != ADT7316_DA_EN_MODE_LDAC) return -EPERM; ret = strict_strtoul(buf, 16, &data); if (ret || data > ADT7316_LDAC_EN_DA_MASK) return -EINVAL; ldac_config = chip->ldac_config & (~ADT7316_LDAC_EN_DA_MASK); ldac_config |= data; ret = chip->bus.write(chip->bus.client, ADT7316_LDAC_CONFIG, ldac_config); if (ret) return -EIO; } else { gpio_set_value(chip->ldac_pin, 0); gpio_set_value(chip->ldac_pin, 1); } return len; } static IIO_DEVICE_ATTR(update_DAC, S_IRUGO | S_IWUSR, NULL, adt7316_store_update_DAC, 0); static ssize_t adt7316_show_DA_AB_Vref_bypass(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return -EPERM; return sprintf(buf, "%d\n", !!(chip->dac_config & ADT7316_VREF_BYPASS_DAC_AB)); } static ssize_t adt7316_store_DA_AB_Vref_bypass(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 dac_config; int ret; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return -EPERM; dac_config = chip->dac_config & (~ADT7316_VREF_BYPASS_DAC_AB); if (!memcmp(buf, "1", 1)) dac_config |= ADT7316_VREF_BYPASS_DAC_AB; ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config); if (ret) return -EIO; chip->dac_config = dac_config; return len; } static IIO_DEVICE_ATTR(DA_AB_Vref_bypass, S_IRUGO | S_IWUSR, adt7316_show_DA_AB_Vref_bypass, adt7316_store_DA_AB_Vref_bypass, 0); static ssize_t adt7316_show_DA_CD_Vref_bypass(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return -EPERM; return sprintf(buf, "%d\n", !!(chip->dac_config & ADT7316_VREF_BYPASS_DAC_CD)); } static ssize_t adt7316_store_DA_CD_Vref_bypass(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 dac_config; int ret; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return -EPERM; dac_config = chip->dac_config & (~ADT7316_VREF_BYPASS_DAC_CD); if (!memcmp(buf, "1", 1)) dac_config |= ADT7316_VREF_BYPASS_DAC_CD; ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config); if (ret) return -EIO; chip->dac_config = dac_config; return len; } static IIO_DEVICE_ATTR(DA_CD_Vref_bypass, S_IRUGO | S_IWUSR, adt7316_show_DA_CD_Vref_bypass, adt7316_store_DA_CD_Vref_bypass, 0); static ssize_t adt7316_show_DAC_internal_Vref(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return sprintf(buf, "0x%x\n", (chip->dac_config & ADT7516_DAC_IN_VREF_MASK) >> ADT7516_DAC_IN_VREF_OFFSET); else return sprintf(buf, "%d\n", !!(chip->dac_config & ADT7316_DAC_IN_VREF)); } static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 ldac_config; unsigned long data; int ret; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) { ret = strict_strtoul(buf, 16, &data); if (ret || data > 3) return -EINVAL; ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK); if (data & 0x1) ldac_config |= ADT7516_DAC_AB_IN_VREF; else if (data & 0x2) ldac_config |= ADT7516_DAC_CD_IN_VREF; } else { ret = strict_strtoul(buf, 16, &data); if (ret) return -EINVAL; ldac_config = chip->ldac_config & (~ADT7316_DAC_IN_VREF); if (data) ldac_config = chip->ldac_config | ADT7316_DAC_IN_VREF; } ret = chip->bus.write(chip->bus.client, ADT7316_LDAC_CONFIG, ldac_config); if (ret) return -EIO; chip->ldac_config = ldac_config; return len; } static IIO_DEVICE_ATTR(DAC_internal_Vref, S_IRUGO | S_IWUSR, adt7316_show_DAC_internal_Vref, adt7316_store_DAC_internal_Vref, 0); static ssize_t adt7316_show_ad(struct adt7316_chip_info *chip, int channel, char *buf) { u16 data; u8 msb, lsb; char sign = ' '; int ret; if ((chip->config2 & ADT7316_AD_SINGLE_CH_MODE) && channel != (chip->config2 & ADT7516_AD_SINGLE_CH_MASK)) return -EPERM; switch (channel) { case ADT7316_AD_SINGLE_CH_IN: ret = chip->bus.read(chip->bus.client, ADT7316_LSB_IN_TEMP_VDD, &lsb); if (ret) return -EIO; ret = chip->bus.read(chip->bus.client, ADT7316_AD_MSB_DATA_BASE + channel, &msb); if (ret) return -EIO; data = msb << ADT7316_T_VALUE_FLOAT_OFFSET; data |= lsb & ADT7316_LSB_IN_TEMP_MASK; break; case ADT7316_AD_SINGLE_CH_VDD: ret = chip->bus.read(chip->bus.client, ADT7316_LSB_IN_TEMP_VDD, &lsb); if (ret) return -EIO; ret = chip->bus.read(chip->bus.client, ADT7316_AD_MSB_DATA_BASE + channel, &msb); if (ret) return -EIO; data = msb << ADT7316_T_VALUE_FLOAT_OFFSET; data |= (lsb & ADT7316_LSB_VDD_MASK) >> ADT7316_LSB_VDD_OFFSET; return sprintf(buf, "%d\n", data); default: /* ex_temp and ain */ ret = chip->bus.read(chip->bus.client, ADT7316_LSB_EX_TEMP_AIN, &lsb); if (ret) return -EIO; ret = chip->bus.read(chip->bus.client, ADT7316_AD_MSB_DATA_BASE + channel, &msb); if (ret) return -EIO; data = msb << ADT7316_T_VALUE_FLOAT_OFFSET; data |= lsb & (ADT7316_LSB_EX_TEMP_MASK << (ADT7516_LSB_AIN_SHIFT * (channel - (ADT7316_MSB_EX_TEMP - ADT7316_AD_MSB_DATA_BASE)))); if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return sprintf(buf, "%d\n", data); else break; } if (data & ADT7316_T_VALUE_SIGN) { /* convert supplement to positive value */ data = (ADT7316_T_VALUE_SIGN << 1) - data; sign = '-'; } return sprintf(buf, "%c%d.%.2d\n", sign, (data >> ADT7316_T_VALUE_FLOAT_OFFSET), (data & ADT7316_T_VALUE_FLOAT_MASK) * 25); } static ssize_t adt7316_show_VDD(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_VDD, buf); } static IIO_DEVICE_ATTR(VDD, S_IRUGO, adt7316_show_VDD, NULL, 0); static ssize_t adt7316_show_in_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_IN, buf); } static IIO_DEVICE_ATTR(in_temp, S_IRUGO, adt7316_show_in_temp, NULL, 0); static ssize_t adt7316_show_ex_temp_AIN1(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_EX, buf); } static IIO_DEVICE_ATTR(ex_temp_AIN1, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0); static IIO_DEVICE_ATTR(ex_temp, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0); static ssize_t adt7316_show_AIN2(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN2, buf); } static IIO_DEVICE_ATTR(AIN2, S_IRUGO, adt7316_show_AIN2, NULL, 0); static ssize_t adt7316_show_AIN3(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN3, buf); } static IIO_DEVICE_ATTR(AIN3, S_IRUGO, adt7316_show_AIN3, NULL, 0); static ssize_t adt7316_show_AIN4(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN4, buf); } static IIO_DEVICE_ATTR(AIN4, S_IRUGO, adt7316_show_AIN4, NULL, 0); static ssize_t adt7316_show_temp_offset(struct adt7316_chip_info *chip, int offset_addr, char *buf) { int data; u8 val; int ret; ret = chip->bus.read(chip->bus.client, offset_addr, &val); if (ret) return -EIO; data = (int)val; if (val & 0x80) data -= 256; return sprintf(buf, "%d\n", data); } static ssize_t adt7316_store_temp_offset(struct adt7316_chip_info *chip, int offset_addr, const char *buf, size_t len) { long data; u8 val; int ret; ret = strict_strtol(buf, 10, &data); if (ret || data > 127 || data < -128) return -EINVAL; if (data < 0) data += 256; val = (u8)data; ret = chip->bus.write(chip->bus.client, offset_addr, val); if (ret) return -EIO; return len; } static ssize_t adt7316_show_in_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf); } static ssize_t adt7316_store_in_temp_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf, len); } static IIO_DEVICE_ATTR(in_temp_offset, S_IRUGO | S_IWUSR, adt7316_show_in_temp_offset, adt7316_store_in_temp_offset, 0); static ssize_t adt7316_show_ex_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf); } static ssize_t adt7316_store_ex_temp_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf, len); } static IIO_DEVICE_ATTR(ex_temp_offset, S_IRUGO | S_IWUSR, adt7316_show_ex_temp_offset, adt7316_store_ex_temp_offset, 0); static ssize_t adt7316_show_in_analog_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_temp_offset(chip, ADT7316_IN_ANALOG_TEMP_OFFSET, buf); } static ssize_t adt7316_store_in_analog_temp_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_temp_offset(chip, ADT7316_IN_ANALOG_TEMP_OFFSET, buf, len); } static IIO_DEVICE_ATTR(in_analog_temp_offset, S_IRUGO | S_IWUSR, adt7316_show_in_analog_temp_offset, adt7316_store_in_analog_temp_offset, 0); static ssize_t adt7316_show_ex_analog_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_temp_offset(chip, ADT7316_EX_ANALOG_TEMP_OFFSET, buf); } static ssize_t adt7316_store_ex_analog_temp_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_temp_offset(chip, ADT7316_EX_ANALOG_TEMP_OFFSET, buf, len); } static IIO_DEVICE_ATTR(ex_analog_temp_offset, S_IRUGO | S_IWUSR, adt7316_show_ex_analog_temp_offset, adt7316_store_ex_analog_temp_offset, 0); static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip, int channel, char *buf) { u16 data; u8 msb, lsb, offset; int ret; if (channel >= ADT7316_DA_MSB_DATA_REGS || (channel == 0 && (chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)) || (channel == 1 && (chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB))) return -EPERM; offset = chip->dac_bits - 8; if (chip->dac_bits > 8) { ret = chip->bus.read(chip->bus.client, ADT7316_DA_DATA_BASE + channel * 2, &lsb); if (ret) return -EIO; } ret = chip->bus.read(chip->bus.client, ADT7316_DA_DATA_BASE + 1 + channel * 2, &msb); if (ret) return -EIO; data = (msb << offset) + (lsb & ((1 << offset) - 1)); return sprintf(buf, "%d\n", data); } static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip, int channel, const char *buf, size_t len) { u8 msb, lsb, offset; unsigned long data; int ret; if (channel >= ADT7316_DA_MSB_DATA_REGS || (channel == 0 && (chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)) || (channel == 1 && (chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB))) return -EPERM; offset = chip->dac_bits - 8; ret = strict_strtoul(buf, 10, &data); if (ret || data >= (1 << chip->dac_bits)) return -EINVAL; if (chip->dac_bits > 8) { lsb = data & (1 << offset); ret = chip->bus.write(chip->bus.client, ADT7316_DA_DATA_BASE + channel * 2, lsb); if (ret) return -EIO; } msb = data >> offset; ret = chip->bus.write(chip->bus.client, ADT7316_DA_DATA_BASE + 1 + channel * 2, msb); if (ret) return -EIO; return len; } static ssize_t adt7316_show_DAC_A(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_DAC(chip, 0, buf); } static ssize_t adt7316_store_DAC_A(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_DAC(chip, 0, buf, len); } static IIO_DEVICE_ATTR(DAC_A, S_IRUGO | S_IWUSR, adt7316_show_DAC_A, adt7316_store_DAC_A, 0); static ssize_t adt7316_show_DAC_B(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_DAC(chip, 1, buf); } static ssize_t adt7316_store_DAC_B(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_DAC(chip, 1, buf, len); } static IIO_DEVICE_ATTR(DAC_B, S_IRUGO | S_IWUSR, adt7316_show_DAC_B, adt7316_store_DAC_B, 0); static ssize_t adt7316_show_DAC_C(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_DAC(chip, 2, buf); } static ssize_t adt7316_store_DAC_C(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_DAC(chip, 2, buf, len); } static IIO_DEVICE_ATTR(DAC_C, S_IRUGO | S_IWUSR, adt7316_show_DAC_C, adt7316_store_DAC_C, 0); static ssize_t adt7316_show_DAC_D(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_DAC(chip, 3, buf); } static ssize_t adt7316_store_DAC_D(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_DAC(chip, 3, buf, len); } static IIO_DEVICE_ATTR(DAC_D, S_IRUGO | S_IWUSR, adt7316_show_DAC_D, adt7316_store_DAC_D, 0); static ssize_t adt7316_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 id; int ret; ret = chip->bus.read(chip->bus.client, ADT7316_DEVICE_ID, &id); if (ret) return -EIO; return sprintf(buf, "%d\n", id); } static IIO_DEVICE_ATTR(device_id, S_IRUGO, adt7316_show_device_id, NULL, 0); static ssize_t adt7316_show_manufactorer_id(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 id; int ret; ret = chip->bus.read(chip->bus.client, ADT7316_MANUFACTURE_ID, &id); if (ret) return -EIO; return sprintf(buf, "%d\n", id); } static IIO_DEVICE_ATTR(manufactorer_id, S_IRUGO, adt7316_show_manufactorer_id, NULL, 0); static ssize_t adt7316_show_device_rev(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 rev; int ret; ret = chip->bus.read(chip->bus.client, ADT7316_DEVICE_REV, &rev); if (ret) return -EIO; return sprintf(buf, "%d\n", rev); } static IIO_DEVICE_ATTR(device_rev, S_IRUGO, adt7316_show_device_rev, NULL, 0); static ssize_t adt7316_show_bus_type(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 stat; int ret; ret = chip->bus.read(chip->bus.client, ADT7316_SPI_LOCK_STAT, &stat); if (ret) return -EIO; if (stat) return sprintf(buf, "spi\n"); else return sprintf(buf, "i2c\n"); } static IIO_DEVICE_ATTR(bus_type, S_IRUGO, adt7316_show_bus_type, NULL, 0); static struct attribute *adt7316_attributes[] = { &iio_dev_attr_all_modes.dev_attr.attr, &iio_dev_attr_mode.dev_attr.attr, &iio_dev_attr_reset.dev_attr.attr, &iio_dev_attr_enabled.dev_attr.attr, &iio_dev_attr_ad_channel.dev_attr.attr, &iio_dev_attr_all_ad_channels.dev_attr.attr, &iio_dev_attr_disable_averaging.dev_attr.attr, &iio_dev_attr_enable_smbus_timeout.dev_attr.attr, &iio_dev_attr_powerdown.dev_attr.attr, &iio_dev_attr_fast_ad_clock.dev_attr.attr, &iio_dev_attr_da_high_resolution.dev_attr.attr, &iio_dev_attr_enable_proportion_DACA.dev_attr.attr, &iio_dev_attr_enable_proportion_DACB.dev_attr.attr, &iio_dev_attr_DAC_2Vref_channels_mask.dev_attr.attr, &iio_dev_attr_DAC_update_mode.dev_attr.attr, &iio_dev_attr_all_DAC_update_modes.dev_attr.attr, &iio_dev_attr_update_DAC.dev_attr.attr, &iio_dev_attr_DA_AB_Vref_bypass.dev_attr.attr, &iio_dev_attr_DA_CD_Vref_bypass.dev_attr.attr, &iio_dev_attr_DAC_internal_Vref.dev_attr.attr, &iio_dev_attr_VDD.dev_attr.attr, &iio_dev_attr_in_temp.dev_attr.attr, &iio_dev_attr_ex_temp.dev_attr.attr, &iio_dev_attr_in_temp_offset.dev_attr.attr, &iio_dev_attr_ex_temp_offset.dev_attr.attr, &iio_dev_attr_in_analog_temp_offset.dev_attr.attr, &iio_dev_attr_ex_analog_temp_offset.dev_attr.attr, &iio_dev_attr_DAC_A.dev_attr.attr, &iio_dev_attr_DAC_B.dev_attr.attr, &iio_dev_attr_DAC_C.dev_attr.attr, &iio_dev_attr_DAC_D.dev_attr.attr, &iio_dev_attr_device_id.dev_attr.attr, &iio_dev_attr_manufactorer_id.dev_attr.attr, &iio_dev_attr_device_rev.dev_attr.attr, &iio_dev_attr_bus_type.dev_attr.attr, NULL, }; static const struct attribute_group adt7316_attribute_group = { .attrs = adt7316_attributes, }; static struct attribute *adt7516_attributes[] = { &iio_dev_attr_all_modes.dev_attr.attr, &iio_dev_attr_mode.dev_attr.attr, &iio_dev_attr_select_ex_temp.dev_attr.attr, &iio_dev_attr_reset.dev_attr.attr, &iio_dev_attr_enabled.dev_attr.attr, &iio_dev_attr_ad_channel.dev_attr.attr, &iio_dev_attr_all_ad_channels.dev_attr.attr, &iio_dev_attr_disable_averaging.dev_attr.attr, &iio_dev_attr_enable_smbus_timeout.dev_attr.attr, &iio_dev_attr_powerdown.dev_attr.attr, &iio_dev_attr_fast_ad_clock.dev_attr.attr, &iio_dev_attr_AIN_internal_Vref.dev_attr.attr, &iio_dev_attr_da_high_resolution.dev_attr.attr, &iio_dev_attr_enable_proportion_DACA.dev_attr.attr, &iio_dev_attr_enable_proportion_DACB.dev_attr.attr, &iio_dev_attr_DAC_2Vref_channels_mask.dev_attr.attr, &iio_dev_attr_DAC_update_mode.dev_attr.attr, &iio_dev_attr_all_DAC_update_modes.dev_attr.attr, &iio_dev_attr_update_DAC.dev_attr.attr, &iio_dev_attr_DA_AB_Vref_bypass.dev_attr.attr, &iio_dev_attr_DA_CD_Vref_bypass.dev_attr.attr, &iio_dev_attr_DAC_internal_Vref.dev_attr.attr, &iio_dev_attr_VDD.dev_attr.attr, &iio_dev_attr_in_temp.dev_attr.attr, &iio_dev_attr_ex_temp_AIN1.dev_attr.attr, &iio_dev_attr_AIN2.dev_attr.attr, &iio_dev_attr_AIN3.dev_attr.attr, &iio_dev_attr_AIN4.dev_attr.attr, &iio_dev_attr_in_temp_offset.dev_attr.attr, &iio_dev_attr_ex_temp_offset.dev_attr.attr, &iio_dev_attr_in_analog_temp_offset.dev_attr.attr, &iio_dev_attr_ex_analog_temp_offset.dev_attr.attr, &iio_dev_attr_DAC_A.dev_attr.attr, &iio_dev_attr_DAC_B.dev_attr.attr, &iio_dev_attr_DAC_C.dev_attr.attr, &iio_dev_attr_DAC_D.dev_attr.attr, &iio_dev_attr_device_id.dev_attr.attr, &iio_dev_attr_manufactorer_id.dev_attr.attr, &iio_dev_attr_device_rev.dev_attr.attr, &iio_dev_attr_bus_type.dev_attr.attr, NULL, }; static const struct attribute_group adt7516_attribute_group = { .attrs = adt7516_attributes, }; static irqreturn_t adt7316_event_handler(int irq, void *private) { struct iio_dev *indio_dev = private; struct adt7316_chip_info *chip = iio_dev_get_devdata(indio_dev); u8 stat1, stat2; int ret; s64 time; ret = chip->bus.read(chip->bus.client, ADT7316_INT_STAT1, &stat1); if (!ret) { if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX) stat1 &= 0x1F; time = iio_get_time_ns(); if (stat1 & (1 << 0)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), time); if (stat1 & (1 << 1)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), time); if (stat1 & (1 << 2)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_TEMP, 1, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), time); if (stat1 & (1 << 3)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_TEMP, 1, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), time); if (stat1 & (1 << 5)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_IN, 1, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), time); if (stat1 & (1 << 6)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_IN, 2, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), time); if (stat1 & (1 << 7)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_IN, 3, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), time); } ret = chip->bus.read(chip->bus.client, ADT7316_INT_STAT2, &stat2); if (!ret) { if (stat2 & ADT7316_INT_MASK2_VDD) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_IN, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), iio_get_time_ns()); } return IRQ_HANDLED; } /* * Show mask of enabled interrupts in Hex. */ static ssize_t adt7316_show_int_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "0x%x\n", chip->int_mask); } /* * Set 1 to the mask in Hex to enabled interrupts. */ static ssize_t adt7316_set_int_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; unsigned long data; int ret; u8 mask; ret = strict_strtoul(buf, 16, &data); if (ret || data >= ADT7316_VDD_INT_MASK + 1) return -EINVAL; if (data & ADT7316_VDD_INT_MASK) mask = 0; /* enable vdd int */ else mask = ADT7316_INT_MASK2_VDD; /* disable vdd int */ ret = chip->bus.write(chip->bus.client, ADT7316_INT_MASK2, mask); if (!ret) { chip->int_mask &= ~ADT7316_VDD_INT_MASK; chip->int_mask |= data & ADT7316_VDD_INT_MASK; } if (data & ADT7316_TEMP_AIN_INT_MASK) { if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX) /* mask in reg is opposite, set 1 to disable */ mask = (~data) & ADT7316_TEMP_INT_MASK; else /* mask in reg is opposite, set 1 to disable */ mask = (~data) & ADT7316_TEMP_AIN_INT_MASK; } ret = chip->bus.write(chip->bus.client, ADT7316_INT_MASK1, mask); chip->int_mask = mask; return len; } static inline ssize_t adt7316_show_ad_bound(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 val; int data; int ret; if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX && this_attr->address > ADT7316_EX_TEMP_LOW) return -EPERM; ret = chip->bus.read(chip->bus.client, this_attr->address, &val); if (ret) return -EIO; data = (int)val; if (!((chip->id & ID_FAMILY_MASK) == ID_ADT75XX && (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)) { if (data & 0x80) data -= 256; } return sprintf(buf, "%d\n", data); } static inline ssize_t adt7316_set_ad_bound(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; long data; u8 val; int ret; if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX && this_attr->address > ADT7316_EX_TEMP_LOW) return -EPERM; ret = strict_strtol(buf, 10, &data); if (ret) return -EINVAL; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX && (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0) { if (data > 255 || data < 0) return -EINVAL; } else { if (data > 127 || data < -128) return -EINVAL; if (data < 0) data += 256; } val = (u8)data; ret = chip->bus.write(chip->bus.client, this_attr->address, val); if (ret) return -EIO; return len; } static ssize_t adt7316_show_int_enabled(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_INT_EN)); } static ssize_t adt7316_set_int_enabled(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config1; int ret; config1 = chip->config1 & (~ADT7316_INT_EN); if (!memcmp(buf, "1", 1)) config1 |= ADT7316_INT_EN; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1); if (ret) return -EIO; chip->config1 = config1; return len; } static IIO_DEVICE_ATTR(int_mask, S_IRUGO | S_IWUSR, adt7316_show_int_mask, adt7316_set_int_mask, 0); static IIO_DEVICE_ATTR(in_temp_high_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7316_IN_TEMP_HIGH); static IIO_DEVICE_ATTR(in_temp_low_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7316_IN_TEMP_LOW); static IIO_DEVICE_ATTR(ex_temp_high_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7316_EX_TEMP_HIGH); static IIO_DEVICE_ATTR(ex_temp_low_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7316_EX_TEMP_LOW); /* NASTY duplication to be fixed */ static IIO_DEVICE_ATTR(ex_temp_ain1_high_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7316_EX_TEMP_HIGH); static IIO_DEVICE_ATTR(ex_temp_ain1_low_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7316_EX_TEMP_LOW); static IIO_DEVICE_ATTR(ain2_high_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7516_AIN2_HIGH); static IIO_DEVICE_ATTR(ain2_low_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7516_AIN2_LOW); static IIO_DEVICE_ATTR(ain3_high_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7516_AIN3_HIGH); static IIO_DEVICE_ATTR(ain3_low_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7516_AIN3_LOW); static IIO_DEVICE_ATTR(ain4_high_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7516_AIN4_HIGH); static IIO_DEVICE_ATTR(ain4_low_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7516_AIN4_LOW); static IIO_DEVICE_ATTR(int_enabled, S_IRUGO | S_IWUSR, adt7316_show_int_enabled, adt7316_set_int_enabled, 0); static struct attribute *adt7316_event_attributes[] = { &iio_dev_attr_int_mask.dev_attr.attr, &iio_dev_attr_in_temp_high_value.dev_attr.attr, &iio_dev_attr_in_temp_low_value.dev_attr.attr, &iio_dev_attr_ex_temp_high_value.dev_attr.attr, &iio_dev_attr_ex_temp_low_value.dev_attr.attr, &iio_dev_attr_int_enabled.dev_attr.attr, NULL, }; static struct attribute_group adt7316_event_attribute_group = { .attrs = adt7316_event_attributes, }; static struct attribute *adt7516_event_attributes[] = { &iio_dev_attr_int_mask.dev_attr.attr, &iio_dev_attr_in_temp_high_value.dev_attr.attr, &iio_dev_attr_in_temp_low_value.dev_attr.attr, &iio_dev_attr_ex_temp_ain1_high_value.dev_attr.attr, &iio_dev_attr_ex_temp_ain1_low_value.dev_attr.attr, &iio_dev_attr_ain2_high_value.dev_attr.attr, &iio_dev_attr_ain2_low_value.dev_attr.attr, &iio_dev_attr_ain3_high_value.dev_attr.attr, &iio_dev_attr_ain3_low_value.dev_attr.attr, &iio_dev_attr_ain4_high_value.dev_attr.attr, &iio_dev_attr_ain4_low_value.dev_attr.attr, &iio_dev_attr_int_enabled.dev_attr.attr, NULL, }; static struct attribute_group adt7516_event_attribute_group = { .attrs = adt7516_event_attributes, }; #ifdef CONFIG_PM int adt7316_disable(struct device *dev) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return _adt7316_store_enabled(chip, 0); } EXPORT_SYMBOL(adt7316_disable); int adt7316_enable(struct device *dev) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return _adt7316_store_enabled(chip, 1); } EXPORT_SYMBOL(adt7316_enable); #endif static const struct iio_info adt7316_info = { .attrs = &adt7316_attribute_group, .num_interrupt_lines = 1, .event_attrs = &adt7316_event_attribute_group, .driver_module = THIS_MODULE, }; static const struct iio_info adt7516_info = { .attrs = &adt7516_attribute_group, .num_interrupt_lines = 1, .event_attrs = &adt7516_event_attribute_group, .driver_module = THIS_MODULE, }; /* * device probe and remove */ int __devinit adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name) { struct adt7316_chip_info *chip; unsigned short *adt7316_platform_data = dev->platform_data; int ret = 0; chip = kzalloc(sizeof(struct adt7316_chip_info), GFP_KERNEL); if (chip == NULL) return -ENOMEM; /* this is only used for device removal purposes */ dev_set_drvdata(dev, chip); chip->bus = *bus; if (name[4] == '3') chip->id = ID_ADT7316 + (name[6] - '6'); else if (name[4] == '5') chip->id = ID_ADT7516 + (name[6] - '6'); else return -ENODEV; chip->ldac_pin = adt7316_platform_data[1]; if (chip->ldac_pin) { chip->config3 |= ADT7316_DA_EN_VIA_DAC_LDCA; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) chip->config1 |= ADT7516_SEL_AIN3; } chip->int_mask = ADT7316_TEMP_INT_MASK | ADT7316_VDD_INT_MASK; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) chip->int_mask |= ADT7516_AIN_INT_MASK; chip->indio_dev = iio_allocate_device(0); if (chip->indio_dev == NULL) { ret = -ENOMEM; goto error_free_chip; } chip->indio_dev->dev.parent = dev; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) chip->indio_dev->info = &adt7516_info; else chip->indio_dev->info = &adt7316_info; chip->indio_dev->name = name; chip->indio_dev->dev_data = (void *)chip; chip->indio_dev->modes = INDIO_DIRECT_MODE; ret = iio_device_register(chip->indio_dev); if (ret) goto error_free_dev; if (chip->bus.irq > 0) { if (adt7316_platform_data[0]) chip->bus.irq_flags = adt7316_platform_data[0]; ret = request_threaded_irq(chip->bus.irq, NULL, &adt7316_event_handler, chip->bus.irq_flags | IRQF_ONESHOT, chip->indio_dev->name, chip->indio_dev); if (ret) goto error_unreg_dev; if (chip->bus.irq_flags & IRQF_TRIGGER_HIGH) chip->config1 |= ADT7316_INT_POLARITY; } ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, chip->config1); if (ret) { ret = -EIO; goto error_unreg_irq; } ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, chip->config3); if (ret) { ret = -EIO; goto error_unreg_irq; } dev_info(dev, "%s temperature sensor, ADC and DAC registered.\n", chip->indio_dev->name); return 0; error_unreg_irq: free_irq(chip->bus.irq, chip->indio_dev); error_unreg_dev: iio_device_unregister(chip->indio_dev); error_free_dev: iio_free_device(chip->indio_dev); error_free_chip: kfree(chip); return ret; } EXPORT_SYMBOL(adt7316_probe); int __devexit adt7316_remove(struct device *dev) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; dev_set_drvdata(dev, NULL); if (chip->bus.irq) free_irq(chip->bus.irq, chip->indio_dev); iio_device_unregister(chip->indio_dev); iio_free_device(chip->indio_dev); kfree(chip); return 0; } EXPORT_SYMBOL(adt7316_remove); MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); MODULE_DESCRIPTION("Analog Devices ADT7316/7/8 and ADT7516/7/9 digital" " temperature sensor, ADC and DAC driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
fards/Ainol_fire_kernel
drivers/staging/iio/addac/adt7316.c
2309
58746
/* * ADT7316 digital temperature sensor driver supporting ADT7316/7/8 ADT7516/7/9 * * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/workqueue.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/i2c.h> #include <linux/rtc.h> #include "../iio.h" #include "../sysfs.h" #include "adt7316.h" /* * ADT7316 registers definition */ #define ADT7316_INT_STAT1 0x0 #define ADT7316_INT_STAT2 0x1 #define ADT7316_LSB_IN_TEMP_VDD 0x3 #define ADT7316_LSB_IN_TEMP_MASK 0x3 #define ADT7316_LSB_VDD_MASK 0xC #define ADT7316_LSB_VDD_OFFSET 2 #define ADT7316_LSB_EX_TEMP_AIN 0x4 #define ADT7316_LSB_EX_TEMP_MASK 0x3 #define ADT7516_LSB_AIN_SHIFT 2 #define ADT7316_AD_MSB_DATA_BASE 0x6 #define ADT7316_AD_MSB_DATA_REGS 3 #define ADT7516_AD_MSB_DATA_REGS 6 #define ADT7316_MSB_VDD 0x6 #define ADT7316_MSB_IN_TEMP 0x7 #define ADT7316_MSB_EX_TEMP 0x8 #define ADT7516_MSB_AIN1 0x8 #define ADT7516_MSB_AIN2 0x9 #define ADT7516_MSB_AIN3 0xA #define ADT7516_MSB_AIN4 0xB #define ADT7316_DA_DATA_BASE 0x10 #define ADT7316_DA_MSB_DATA_REGS 4 #define ADT7316_LSB_DAC_A 0x10 #define ADT7316_MSB_DAC_A 0x11 #define ADT7316_LSB_DAC_B 0x12 #define ADT7316_MSB_DAC_B 0x13 #define ADT7316_LSB_DAC_C 0x14 #define ADT7316_MSB_DAC_C 0x15 #define ADT7316_LSB_DAC_D 0x16 #define ADT7316_MSB_DAC_D 0x17 #define ADT7316_CONFIG1 0x18 #define ADT7316_CONFIG2 0x19 #define ADT7316_CONFIG3 0x1A #define ADT7316_LDAC_CONFIG 0x1B #define ADT7316_DAC_CONFIG 0x1C #define ADT7316_INT_MASK1 0x1D #define ADT7316_INT_MASK2 0x1E #define ADT7316_IN_TEMP_OFFSET 0x1F #define ADT7316_EX_TEMP_OFFSET 0x20 #define ADT7316_IN_ANALOG_TEMP_OFFSET 0x21 #define ADT7316_EX_ANALOG_TEMP_OFFSET 0x22 #define ADT7316_VDD_HIGH 0x23 #define ADT7316_VDD_LOW 0x24 #define ADT7316_IN_TEMP_HIGH 0x25 #define ADT7316_IN_TEMP_LOW 0x26 #define ADT7316_EX_TEMP_HIGH 0x27 #define ADT7316_EX_TEMP_LOW 0x28 #define ADT7516_AIN2_HIGH 0x2B #define ADT7516_AIN2_LOW 0x2C #define ADT7516_AIN3_HIGH 0x2D #define ADT7516_AIN3_LOW 0x2E #define ADT7516_AIN4_HIGH 0x2F #define ADT7516_AIN4_LOW 0x30 #define ADT7316_DEVICE_ID 0x4D #define ADT7316_MANUFACTURE_ID 0x4E #define ADT7316_DEVICE_REV 0x4F #define ADT7316_SPI_LOCK_STAT 0x7F /* * ADT7316 config1 */ #define ADT7316_EN 0x1 #define ADT7516_SEL_EX_TEMP 0x4 #define ADT7516_SEL_AIN1_2_EX_TEMP_MASK 0x6 #define ADT7516_SEL_AIN3 0x8 #define ADT7316_INT_EN 0x20 #define ADT7316_INT_POLARITY 0x40 #define ADT7316_PD 0x80 /* * ADT7316 config2 */ #define ADT7316_AD_SINGLE_CH_MASK 0x3 #define ADT7516_AD_SINGLE_CH_MASK 0x7 #define ADT7316_AD_SINGLE_CH_VDD 0 #define ADT7316_AD_SINGLE_CH_IN 1 #define ADT7316_AD_SINGLE_CH_EX 2 #define ADT7516_AD_SINGLE_CH_AIN1 2 #define ADT7516_AD_SINGLE_CH_AIN2 3 #define ADT7516_AD_SINGLE_CH_AIN3 4 #define ADT7516_AD_SINGLE_CH_AIN4 5 #define ADT7316_AD_SINGLE_CH_MODE 0x10 #define ADT7316_DISABLE_AVERAGING 0x20 #define ADT7316_EN_SMBUS_TIMEOUT 0x40 #define ADT7316_RESET 0x80 /* * ADT7316 config3 */ #define ADT7316_ADCLK_22_5 0x1 #define ADT7316_DA_HIGH_RESOLUTION 0x2 #define ADT7316_DA_EN_VIA_DAC_LDCA 0x4 #define ADT7516_AIN_IN_VREF 0x10 #define ADT7316_EN_IN_TEMP_PROP_DACA 0x20 #define ADT7316_EN_EX_TEMP_PROP_DACB 0x40 /* * ADT7316 DAC config */ #define ADT7316_DA_2VREF_CH_MASK 0xF #define ADT7316_DA_EN_MODE_MASK 0x30 #define ADT7316_DA_EN_MODE_SINGLE 0x00 #define ADT7316_DA_EN_MODE_AB_CD 0x10 #define ADT7316_DA_EN_MODE_ABCD 0x20 #define ADT7316_DA_EN_MODE_LDAC 0x30 #define ADT7316_VREF_BYPASS_DAC_AB 0x40 #define ADT7316_VREF_BYPASS_DAC_CD 0x80 /* * ADT7316 LDAC config */ #define ADT7316_LDAC_EN_DA_MASK 0xF #define ADT7316_DAC_IN_VREF 0x10 #define ADT7516_DAC_AB_IN_VREF 0x10 #define ADT7516_DAC_CD_IN_VREF 0x20 #define ADT7516_DAC_IN_VREF_OFFSET 4 #define ADT7516_DAC_IN_VREF_MASK 0x30 /* * ADT7316 INT_MASK2 */ #define ADT7316_INT_MASK2_VDD 0x10 /* * ADT7316 value masks */ #define ADT7316_VALUE_MASK 0xfff #define ADT7316_T_VALUE_SIGN 0x400 #define ADT7316_T_VALUE_FLOAT_OFFSET 2 #define ADT7316_T_VALUE_FLOAT_MASK 0x2 /* * Chip ID */ #define ID_ADT7316 0x1 #define ID_ADT7317 0x2 #define ID_ADT7318 0x3 #define ID_ADT7516 0x11 #define ID_ADT7517 0x12 #define ID_ADT7519 0x14 #define ID_FAMILY_MASK 0xF0 #define ID_ADT73XX 0x0 #define ID_ADT75XX 0x10 /* * struct adt7316_chip_info - chip specifc information */ struct adt7316_chip_info { struct iio_dev *indio_dev; struct adt7316_bus bus; u16 ldac_pin; u16 int_mask; /* 0x2f */ u8 config1; u8 config2; u8 config3; u8 dac_config; /* DAC config */ u8 ldac_config; /* LDAC config */ u8 dac_bits; /* 8, 10, 12 */ u8 id; /* chip id */ }; /* * Logic interrupt mask for user application to enable * interrupts. */ #define ADT7316_IN_TEMP_HIGH_INT_MASK 0x1 #define ADT7316_IN_TEMP_LOW_INT_MASK 0x2 #define ADT7316_EX_TEMP_HIGH_INT_MASK 0x4 #define ADT7316_EX_TEMP_LOW_INT_MASK 0x8 #define ADT7316_EX_TEMP_FAULT_INT_MASK 0x10 #define ADT7516_AIN1_INT_MASK 0x4 #define ADT7516_AIN2_INT_MASK 0x20 #define ADT7516_AIN3_INT_MASK 0x40 #define ADT7516_AIN4_INT_MASK 0x80 #define ADT7316_VDD_INT_MASK 0x100 #define ADT7316_TEMP_INT_MASK 0x1F #define ADT7516_AIN_INT_MASK 0xE0 #define ADT7316_TEMP_AIN_INT_MASK \ (ADT7316_TEMP_INT_MASK | ADT7316_TEMP_INT_MASK) /* * struct adt7316_chip_info - chip specifc information */ struct adt7316_limit_regs { u16 data_high; u16 data_low; }; static ssize_t adt7316_show_enabled(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_EN)); } static ssize_t _adt7316_store_enabled(struct adt7316_chip_info *chip, int enable) { u8 config1; int ret; if (enable) config1 = chip->config1 | ADT7316_EN; else config1 = chip->config1 & ~ADT7316_EN; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1); if (ret) return -EIO; chip->config1 = config1; return ret; } static ssize_t adt7316_store_enabled(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; int enable; if (!memcmp(buf, "1", 1)) enable = 1; else enable = 0; if (_adt7316_store_enabled(chip, enable) < 0) return -EIO; else return len; } static IIO_DEVICE_ATTR(enabled, S_IRUGO | S_IWUSR, adt7316_show_enabled, adt7316_store_enabled, 0); static ssize_t adt7316_show_select_ex_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX) return -EPERM; return sprintf(buf, "%d\n", !!(chip->config1 & ADT7516_SEL_EX_TEMP)); } static ssize_t adt7316_store_select_ex_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config1; int ret; if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX) return -EPERM; config1 = chip->config1 & (~ADT7516_SEL_EX_TEMP); if (!memcmp(buf, "1", 1)) config1 |= ADT7516_SEL_EX_TEMP; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1); if (ret) return -EIO; chip->config1 = config1; return len; } static IIO_DEVICE_ATTR(select_ex_temp, S_IRUGO | S_IWUSR, adt7316_show_select_ex_temp, adt7316_store_select_ex_temp, 0); static ssize_t adt7316_show_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if (chip->config2 & ADT7316_AD_SINGLE_CH_MODE) return sprintf(buf, "single_channel\n"); else return sprintf(buf, "round_robin\n"); } static ssize_t adt7316_store_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config2; int ret; config2 = chip->config2 & (~ADT7316_AD_SINGLE_CH_MODE); if (!memcmp(buf, "single_channel", 14)) config2 |= ADT7316_AD_SINGLE_CH_MODE; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2); if (ret) return -EIO; chip->config2 = config2; return len; } static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, adt7316_show_mode, adt7316_store_mode, 0); static ssize_t adt7316_show_all_modes(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "single_channel\nround_robin\n"); } static IIO_DEVICE_ATTR(all_modes, S_IRUGO, adt7316_show_all_modes, NULL, 0); static ssize_t adt7316_show_ad_channel(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE)) return -EPERM; switch (chip->config2 & ADT7516_AD_SINGLE_CH_MASK) { case ADT7316_AD_SINGLE_CH_VDD: return sprintf(buf, "0 - VDD\n"); case ADT7316_AD_SINGLE_CH_IN: return sprintf(buf, "1 - Internal Temperature\n"); case ADT7316_AD_SINGLE_CH_EX: if (((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) && (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0) return sprintf(buf, "2 - AIN1\n"); else return sprintf(buf, "2 - External Temperature\n"); case ADT7516_AD_SINGLE_CH_AIN2: if ((chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0) return sprintf(buf, "3 - AIN2\n"); else return sprintf(buf, "N/A\n"); case ADT7516_AD_SINGLE_CH_AIN3: if (chip->config1 & ADT7516_SEL_AIN3) return sprintf(buf, "4 - AIN3\n"); else return sprintf(buf, "N/A\n"); case ADT7516_AD_SINGLE_CH_AIN4: return sprintf(buf, "5 - AIN4\n"); default: return sprintf(buf, "N/A\n"); } } static ssize_t adt7316_store_ad_channel(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config2; unsigned long data = 0; int ret; if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE)) return -EPERM; ret = strict_strtoul(buf, 10, &data); if (ret) return -EINVAL; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) { if (data > 5) return -EINVAL; config2 = chip->config2 & (~ADT7516_AD_SINGLE_CH_MASK); } else { if (data > 2) return -EINVAL; config2 = chip->config2 & (~ADT7316_AD_SINGLE_CH_MASK); } config2 |= data; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2); if (ret) return -EIO; chip->config2 = config2; return len; } static IIO_DEVICE_ATTR(ad_channel, S_IRUGO | S_IWUSR, adt7316_show_ad_channel, adt7316_store_ad_channel, 0); static ssize_t adt7316_show_all_ad_channels(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if (!(chip->config2 & ADT7316_AD_SINGLE_CH_MODE)) return -EPERM; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n" "2 - External Temperature or AIN1\n" "3 - AIN2\n4 - AIN3\n5 - AIN4\n"); else return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n" "2 - External Temperature\n"); } static IIO_DEVICE_ATTR(all_ad_channels, S_IRUGO, adt7316_show_all_ad_channels, NULL, 0); static ssize_t adt7316_show_disable_averaging(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config2 & ADT7316_DISABLE_AVERAGING)); } static ssize_t adt7316_store_disable_averaging(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config2; int ret; config2 = chip->config2 & (~ADT7316_DISABLE_AVERAGING); if (!memcmp(buf, "1", 1)) config2 |= ADT7316_DISABLE_AVERAGING; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2); if (ret) return -EIO; chip->config2 = config2; return len; } static IIO_DEVICE_ATTR(disable_averaging, S_IRUGO | S_IWUSR, adt7316_show_disable_averaging, adt7316_store_disable_averaging, 0); static ssize_t adt7316_show_enable_smbus_timeout(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config2 & ADT7316_EN_SMBUS_TIMEOUT)); } static ssize_t adt7316_store_enable_smbus_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config2; int ret; config2 = chip->config2 & (~ADT7316_EN_SMBUS_TIMEOUT); if (!memcmp(buf, "1", 1)) config2 |= ADT7316_EN_SMBUS_TIMEOUT; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2); if (ret) return -EIO; chip->config2 = config2; return len; } static IIO_DEVICE_ATTR(enable_smbus_timeout, S_IRUGO | S_IWUSR, adt7316_show_enable_smbus_timeout, adt7316_store_enable_smbus_timeout, 0); static ssize_t adt7316_store_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config2; int ret; config2 = chip->config2 | ADT7316_RESET; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2); if (ret) return -EIO; return len; } static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adt7316_store_reset, 0); static ssize_t adt7316_show_powerdown(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_PD)); } static ssize_t adt7316_store_powerdown(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config1; int ret; config1 = chip->config1 & (~ADT7316_PD); if (!memcmp(buf, "1", 1)) config1 |= ADT7316_PD; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1); if (ret) return -EIO; chip->config1 = config1; return len; } static IIO_DEVICE_ATTR(powerdown, S_IRUGO | S_IWUSR, adt7316_show_powerdown, adt7316_store_powerdown, 0); static ssize_t adt7316_show_fast_ad_clock(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config3 & ADT7316_ADCLK_22_5)); } static ssize_t adt7316_store_fast_ad_clock(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config3; int ret; config3 = chip->config3 & (~ADT7316_ADCLK_22_5); if (!memcmp(buf, "1", 1)) config3 |= ADT7316_ADCLK_22_5; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3); if (ret) return -EIO; chip->config3 = config3; return len; } static IIO_DEVICE_ATTR(fast_ad_clock, S_IRUGO | S_IWUSR, adt7316_show_fast_ad_clock, adt7316_store_fast_ad_clock, 0); static ssize_t adt7316_show_da_high_resolution(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if (chip->config3 & ADT7316_DA_HIGH_RESOLUTION) { if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516) return sprintf(buf, "1 (12 bits)\n"); else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517) return sprintf(buf, "1 (10 bits)\n"); } return sprintf(buf, "0 (8 bits)\n"); } static ssize_t adt7316_store_da_high_resolution(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config3; int ret; chip->dac_bits = 8; if (!memcmp(buf, "1", 1)) { config3 = chip->config3 | ADT7316_DA_HIGH_RESOLUTION; if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516) chip->dac_bits = 12; else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517) chip->dac_bits = 10; } else config3 = chip->config3 & (~ADT7316_DA_HIGH_RESOLUTION); ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3); if (ret) return -EIO; chip->config3 = config3; return len; } static IIO_DEVICE_ATTR(da_high_resolution, S_IRUGO | S_IWUSR, adt7316_show_da_high_resolution, adt7316_store_da_high_resolution, 0); static ssize_t adt7316_show_AIN_internal_Vref(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX) return -EPERM; return sprintf(buf, "%d\n", !!(chip->config3 & ADT7516_AIN_IN_VREF)); } static ssize_t adt7316_store_AIN_internal_Vref(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config3; int ret; if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX) return -EPERM; if (memcmp(buf, "1", 1)) config3 = chip->config3 & (~ADT7516_AIN_IN_VREF); else config3 = chip->config3 | ADT7516_AIN_IN_VREF; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3); if (ret) return -EIO; chip->config3 = config3; return len; } static IIO_DEVICE_ATTR(AIN_internal_Vref, S_IRUGO | S_IWUSR, adt7316_show_AIN_internal_Vref, adt7316_store_AIN_internal_Vref, 0); static ssize_t adt7316_show_enable_prop_DACA(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)); } static ssize_t adt7316_store_enable_prop_DACA(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config3; int ret; config3 = chip->config3 & (~ADT7316_EN_IN_TEMP_PROP_DACA); if (!memcmp(buf, "1", 1)) config3 |= ADT7316_EN_IN_TEMP_PROP_DACA; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3); if (ret) return -EIO; chip->config3 = config3; return len; } static IIO_DEVICE_ATTR(enable_proportion_DACA, S_IRUGO | S_IWUSR, adt7316_show_enable_prop_DACA, adt7316_store_enable_prop_DACA, 0); static ssize_t adt7316_show_enable_prop_DACB(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB)); } static ssize_t adt7316_store_enable_prop_DACB(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config3; int ret; config3 = chip->config3 & (~ADT7316_EN_EX_TEMP_PROP_DACB); if (!memcmp(buf, "1", 1)) config3 |= ADT7316_EN_EX_TEMP_PROP_DACB; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3); if (ret) return -EIO; chip->config3 = config3; return len; } static IIO_DEVICE_ATTR(enable_proportion_DACB, S_IRUGO | S_IWUSR, adt7316_show_enable_prop_DACB, adt7316_store_enable_prop_DACB, 0); static ssize_t adt7316_show_DAC_2Vref_ch_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "0x%x\n", chip->dac_config & ADT7316_DA_2VREF_CH_MASK); } static ssize_t adt7316_store_DAC_2Vref_ch_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 dac_config; unsigned long data = 0; int ret; ret = strict_strtoul(buf, 16, &data); if (ret || data > ADT7316_DA_2VREF_CH_MASK) return -EINVAL; dac_config = chip->dac_config & (~ADT7316_DA_2VREF_CH_MASK); dac_config |= data; ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config); if (ret) return -EIO; chip->dac_config = dac_config; return len; } static IIO_DEVICE_ATTR(DAC_2Vref_channels_mask, S_IRUGO | S_IWUSR, adt7316_show_DAC_2Vref_ch_mask, adt7316_store_DAC_2Vref_ch_mask, 0); static ssize_t adt7316_show_DAC_update_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA)) return sprintf(buf, "manual\n"); else { switch (chip->dac_config & ADT7316_DA_EN_MODE_MASK) { case ADT7316_DA_EN_MODE_SINGLE: return sprintf(buf, "0 - auto at any MSB DAC writing\n"); case ADT7316_DA_EN_MODE_AB_CD: return sprintf(buf, "1 - auto at MSB DAC AB and CD writing\n"); case ADT7316_DA_EN_MODE_ABCD: return sprintf(buf, "2 - auto at MSB DAC ABCD writing\n"); default: /* ADT7316_DA_EN_MODE_LDAC */ return sprintf(buf, "3 - manual\n"); } } } static ssize_t adt7316_store_DAC_update_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 dac_config; unsigned long data; int ret; if (!(chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA)) return -EPERM; ret = strict_strtoul(buf, 10, &data); if (ret || data > ADT7316_DA_EN_MODE_MASK) return -EINVAL; dac_config = chip->dac_config & (~ADT7316_DA_EN_MODE_MASK); dac_config |= data; ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config); if (ret) return -EIO; chip->dac_config = dac_config; return len; } static IIO_DEVICE_ATTR(DAC_update_mode, S_IRUGO | S_IWUSR, adt7316_show_DAC_update_mode, adt7316_store_DAC_update_mode, 0); static ssize_t adt7316_show_all_DAC_update_modes(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA) return sprintf(buf, "0 - auto at any MSB DAC writing\n" "1 - auto at MSB DAC AB and CD writing\n" "2 - auto at MSB DAC ABCD writing\n" "3 - manual\n"); else return sprintf(buf, "manual\n"); } static IIO_DEVICE_ATTR(all_DAC_update_modes, S_IRUGO, adt7316_show_all_DAC_update_modes, NULL, 0); static ssize_t adt7316_store_update_DAC(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 ldac_config; unsigned long data; int ret; if (chip->config3 & ADT7316_DA_EN_VIA_DAC_LDCA) { if ((chip->dac_config & ADT7316_DA_EN_MODE_MASK) != ADT7316_DA_EN_MODE_LDAC) return -EPERM; ret = strict_strtoul(buf, 16, &data); if (ret || data > ADT7316_LDAC_EN_DA_MASK) return -EINVAL; ldac_config = chip->ldac_config & (~ADT7316_LDAC_EN_DA_MASK); ldac_config |= data; ret = chip->bus.write(chip->bus.client, ADT7316_LDAC_CONFIG, ldac_config); if (ret) return -EIO; } else { gpio_set_value(chip->ldac_pin, 0); gpio_set_value(chip->ldac_pin, 1); } return len; } static IIO_DEVICE_ATTR(update_DAC, S_IRUGO | S_IWUSR, NULL, adt7316_store_update_DAC, 0); static ssize_t adt7316_show_DA_AB_Vref_bypass(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return -EPERM; return sprintf(buf, "%d\n", !!(chip->dac_config & ADT7316_VREF_BYPASS_DAC_AB)); } static ssize_t adt7316_store_DA_AB_Vref_bypass(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 dac_config; int ret; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return -EPERM; dac_config = chip->dac_config & (~ADT7316_VREF_BYPASS_DAC_AB); if (!memcmp(buf, "1", 1)) dac_config |= ADT7316_VREF_BYPASS_DAC_AB; ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config); if (ret) return -EIO; chip->dac_config = dac_config; return len; } static IIO_DEVICE_ATTR(DA_AB_Vref_bypass, S_IRUGO | S_IWUSR, adt7316_show_DA_AB_Vref_bypass, adt7316_store_DA_AB_Vref_bypass, 0); static ssize_t adt7316_show_DA_CD_Vref_bypass(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return -EPERM; return sprintf(buf, "%d\n", !!(chip->dac_config & ADT7316_VREF_BYPASS_DAC_CD)); } static ssize_t adt7316_store_DA_CD_Vref_bypass(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 dac_config; int ret; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return -EPERM; dac_config = chip->dac_config & (~ADT7316_VREF_BYPASS_DAC_CD); if (!memcmp(buf, "1", 1)) dac_config |= ADT7316_VREF_BYPASS_DAC_CD; ret = chip->bus.write(chip->bus.client, ADT7316_DAC_CONFIG, dac_config); if (ret) return -EIO; chip->dac_config = dac_config; return len; } static IIO_DEVICE_ATTR(DA_CD_Vref_bypass, S_IRUGO | S_IWUSR, adt7316_show_DA_CD_Vref_bypass, adt7316_store_DA_CD_Vref_bypass, 0); static ssize_t adt7316_show_DAC_internal_Vref(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return sprintf(buf, "0x%x\n", (chip->dac_config & ADT7516_DAC_IN_VREF_MASK) >> ADT7516_DAC_IN_VREF_OFFSET); else return sprintf(buf, "%d\n", !!(chip->dac_config & ADT7316_DAC_IN_VREF)); } static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 ldac_config; unsigned long data; int ret; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) { ret = strict_strtoul(buf, 16, &data); if (ret || data > 3) return -EINVAL; ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK); if (data & 0x1) ldac_config |= ADT7516_DAC_AB_IN_VREF; else if (data & 0x2) ldac_config |= ADT7516_DAC_CD_IN_VREF; } else { ret = strict_strtoul(buf, 16, &data); if (ret) return -EINVAL; ldac_config = chip->ldac_config & (~ADT7316_DAC_IN_VREF); if (data) ldac_config = chip->ldac_config | ADT7316_DAC_IN_VREF; } ret = chip->bus.write(chip->bus.client, ADT7316_LDAC_CONFIG, ldac_config); if (ret) return -EIO; chip->ldac_config = ldac_config; return len; } static IIO_DEVICE_ATTR(DAC_internal_Vref, S_IRUGO | S_IWUSR, adt7316_show_DAC_internal_Vref, adt7316_store_DAC_internal_Vref, 0); static ssize_t adt7316_show_ad(struct adt7316_chip_info *chip, int channel, char *buf) { u16 data; u8 msb, lsb; char sign = ' '; int ret; if ((chip->config2 & ADT7316_AD_SINGLE_CH_MODE) && channel != (chip->config2 & ADT7516_AD_SINGLE_CH_MASK)) return -EPERM; switch (channel) { case ADT7316_AD_SINGLE_CH_IN: ret = chip->bus.read(chip->bus.client, ADT7316_LSB_IN_TEMP_VDD, &lsb); if (ret) return -EIO; ret = chip->bus.read(chip->bus.client, ADT7316_AD_MSB_DATA_BASE + channel, &msb); if (ret) return -EIO; data = msb << ADT7316_T_VALUE_FLOAT_OFFSET; data |= lsb & ADT7316_LSB_IN_TEMP_MASK; break; case ADT7316_AD_SINGLE_CH_VDD: ret = chip->bus.read(chip->bus.client, ADT7316_LSB_IN_TEMP_VDD, &lsb); if (ret) return -EIO; ret = chip->bus.read(chip->bus.client, ADT7316_AD_MSB_DATA_BASE + channel, &msb); if (ret) return -EIO; data = msb << ADT7316_T_VALUE_FLOAT_OFFSET; data |= (lsb & ADT7316_LSB_VDD_MASK) >> ADT7316_LSB_VDD_OFFSET; return sprintf(buf, "%d\n", data); default: /* ex_temp and ain */ ret = chip->bus.read(chip->bus.client, ADT7316_LSB_EX_TEMP_AIN, &lsb); if (ret) return -EIO; ret = chip->bus.read(chip->bus.client, ADT7316_AD_MSB_DATA_BASE + channel, &msb); if (ret) return -EIO; data = msb << ADT7316_T_VALUE_FLOAT_OFFSET; data |= lsb & (ADT7316_LSB_EX_TEMP_MASK << (ADT7516_LSB_AIN_SHIFT * (channel - (ADT7316_MSB_EX_TEMP - ADT7316_AD_MSB_DATA_BASE)))); if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) return sprintf(buf, "%d\n", data); else break; } if (data & ADT7316_T_VALUE_SIGN) { /* convert supplement to positive value */ data = (ADT7316_T_VALUE_SIGN << 1) - data; sign = '-'; } return sprintf(buf, "%c%d.%.2d\n", sign, (data >> ADT7316_T_VALUE_FLOAT_OFFSET), (data & ADT7316_T_VALUE_FLOAT_MASK) * 25); } static ssize_t adt7316_show_VDD(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_VDD, buf); } static IIO_DEVICE_ATTR(VDD, S_IRUGO, adt7316_show_VDD, NULL, 0); static ssize_t adt7316_show_in_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_IN, buf); } static IIO_DEVICE_ATTR(in_temp, S_IRUGO, adt7316_show_in_temp, NULL, 0); static ssize_t adt7316_show_ex_temp_AIN1(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_ad(chip, ADT7316_AD_SINGLE_CH_EX, buf); } static IIO_DEVICE_ATTR(ex_temp_AIN1, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0); static IIO_DEVICE_ATTR(ex_temp, S_IRUGO, adt7316_show_ex_temp_AIN1, NULL, 0); static ssize_t adt7316_show_AIN2(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN2, buf); } static IIO_DEVICE_ATTR(AIN2, S_IRUGO, adt7316_show_AIN2, NULL, 0); static ssize_t adt7316_show_AIN3(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN3, buf); } static IIO_DEVICE_ATTR(AIN3, S_IRUGO, adt7316_show_AIN3, NULL, 0); static ssize_t adt7316_show_AIN4(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_ad(chip, ADT7516_AD_SINGLE_CH_AIN4, buf); } static IIO_DEVICE_ATTR(AIN4, S_IRUGO, adt7316_show_AIN4, NULL, 0); static ssize_t adt7316_show_temp_offset(struct adt7316_chip_info *chip, int offset_addr, char *buf) { int data; u8 val; int ret; ret = chip->bus.read(chip->bus.client, offset_addr, &val); if (ret) return -EIO; data = (int)val; if (val & 0x80) data -= 256; return sprintf(buf, "%d\n", data); } static ssize_t adt7316_store_temp_offset(struct adt7316_chip_info *chip, int offset_addr, const char *buf, size_t len) { long data; u8 val; int ret; ret = strict_strtol(buf, 10, &data); if (ret || data > 127 || data < -128) return -EINVAL; if (data < 0) data += 256; val = (u8)data; ret = chip->bus.write(chip->bus.client, offset_addr, val); if (ret) return -EIO; return len; } static ssize_t adt7316_show_in_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf); } static ssize_t adt7316_store_in_temp_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_temp_offset(chip, ADT7316_IN_TEMP_OFFSET, buf, len); } static IIO_DEVICE_ATTR(in_temp_offset, S_IRUGO | S_IWUSR, adt7316_show_in_temp_offset, adt7316_store_in_temp_offset, 0); static ssize_t adt7316_show_ex_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf); } static ssize_t adt7316_store_ex_temp_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_temp_offset(chip, ADT7316_EX_TEMP_OFFSET, buf, len); } static IIO_DEVICE_ATTR(ex_temp_offset, S_IRUGO | S_IWUSR, adt7316_show_ex_temp_offset, adt7316_store_ex_temp_offset, 0); static ssize_t adt7316_show_in_analog_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_temp_offset(chip, ADT7316_IN_ANALOG_TEMP_OFFSET, buf); } static ssize_t adt7316_store_in_analog_temp_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_temp_offset(chip, ADT7316_IN_ANALOG_TEMP_OFFSET, buf, len); } static IIO_DEVICE_ATTR(in_analog_temp_offset, S_IRUGO | S_IWUSR, adt7316_show_in_analog_temp_offset, adt7316_store_in_analog_temp_offset, 0); static ssize_t adt7316_show_ex_analog_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_temp_offset(chip, ADT7316_EX_ANALOG_TEMP_OFFSET, buf); } static ssize_t adt7316_store_ex_analog_temp_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_temp_offset(chip, ADT7316_EX_ANALOG_TEMP_OFFSET, buf, len); } static IIO_DEVICE_ATTR(ex_analog_temp_offset, S_IRUGO | S_IWUSR, adt7316_show_ex_analog_temp_offset, adt7316_store_ex_analog_temp_offset, 0); static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip, int channel, char *buf) { u16 data; u8 msb, lsb, offset; int ret; if (channel >= ADT7316_DA_MSB_DATA_REGS || (channel == 0 && (chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)) || (channel == 1 && (chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB))) return -EPERM; offset = chip->dac_bits - 8; if (chip->dac_bits > 8) { ret = chip->bus.read(chip->bus.client, ADT7316_DA_DATA_BASE + channel * 2, &lsb); if (ret) return -EIO; } ret = chip->bus.read(chip->bus.client, ADT7316_DA_DATA_BASE + 1 + channel * 2, &msb); if (ret) return -EIO; data = (msb << offset) + (lsb & ((1 << offset) - 1)); return sprintf(buf, "%d\n", data); } static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip, int channel, const char *buf, size_t len) { u8 msb, lsb, offset; unsigned long data; int ret; if (channel >= ADT7316_DA_MSB_DATA_REGS || (channel == 0 && (chip->config3 & ADT7316_EN_IN_TEMP_PROP_DACA)) || (channel == 1 && (chip->config3 & ADT7316_EN_EX_TEMP_PROP_DACB))) return -EPERM; offset = chip->dac_bits - 8; ret = strict_strtoul(buf, 10, &data); if (ret || data >= (1 << chip->dac_bits)) return -EINVAL; if (chip->dac_bits > 8) { lsb = data & (1 << offset); ret = chip->bus.write(chip->bus.client, ADT7316_DA_DATA_BASE + channel * 2, lsb); if (ret) return -EIO; } msb = data >> offset; ret = chip->bus.write(chip->bus.client, ADT7316_DA_DATA_BASE + 1 + channel * 2, msb); if (ret) return -EIO; return len; } static ssize_t adt7316_show_DAC_A(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_DAC(chip, 0, buf); } static ssize_t adt7316_store_DAC_A(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_DAC(chip, 0, buf, len); } static IIO_DEVICE_ATTR(DAC_A, S_IRUGO | S_IWUSR, adt7316_show_DAC_A, adt7316_store_DAC_A, 0); static ssize_t adt7316_show_DAC_B(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_DAC(chip, 1, buf); } static ssize_t adt7316_store_DAC_B(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_DAC(chip, 1, buf, len); } static IIO_DEVICE_ATTR(DAC_B, S_IRUGO | S_IWUSR, adt7316_show_DAC_B, adt7316_store_DAC_B, 0); static ssize_t adt7316_show_DAC_C(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_DAC(chip, 2, buf); } static ssize_t adt7316_store_DAC_C(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_DAC(chip, 2, buf, len); } static IIO_DEVICE_ATTR(DAC_C, S_IRUGO | S_IWUSR, adt7316_show_DAC_C, adt7316_store_DAC_C, 0); static ssize_t adt7316_show_DAC_D(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_show_DAC(chip, 3, buf); } static ssize_t adt7316_store_DAC_D(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return adt7316_store_DAC(chip, 3, buf, len); } static IIO_DEVICE_ATTR(DAC_D, S_IRUGO | S_IWUSR, adt7316_show_DAC_D, adt7316_store_DAC_D, 0); static ssize_t adt7316_show_device_id(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 id; int ret; ret = chip->bus.read(chip->bus.client, ADT7316_DEVICE_ID, &id); if (ret) return -EIO; return sprintf(buf, "%d\n", id); } static IIO_DEVICE_ATTR(device_id, S_IRUGO, adt7316_show_device_id, NULL, 0); static ssize_t adt7316_show_manufactorer_id(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 id; int ret; ret = chip->bus.read(chip->bus.client, ADT7316_MANUFACTURE_ID, &id); if (ret) return -EIO; return sprintf(buf, "%d\n", id); } static IIO_DEVICE_ATTR(manufactorer_id, S_IRUGO, adt7316_show_manufactorer_id, NULL, 0); static ssize_t adt7316_show_device_rev(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 rev; int ret; ret = chip->bus.read(chip->bus.client, ADT7316_DEVICE_REV, &rev); if (ret) return -EIO; return sprintf(buf, "%d\n", rev); } static IIO_DEVICE_ATTR(device_rev, S_IRUGO, adt7316_show_device_rev, NULL, 0); static ssize_t adt7316_show_bus_type(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 stat; int ret; ret = chip->bus.read(chip->bus.client, ADT7316_SPI_LOCK_STAT, &stat); if (ret) return -EIO; if (stat) return sprintf(buf, "spi\n"); else return sprintf(buf, "i2c\n"); } static IIO_DEVICE_ATTR(bus_type, S_IRUGO, adt7316_show_bus_type, NULL, 0); static struct attribute *adt7316_attributes[] = { &iio_dev_attr_all_modes.dev_attr.attr, &iio_dev_attr_mode.dev_attr.attr, &iio_dev_attr_reset.dev_attr.attr, &iio_dev_attr_enabled.dev_attr.attr, &iio_dev_attr_ad_channel.dev_attr.attr, &iio_dev_attr_all_ad_channels.dev_attr.attr, &iio_dev_attr_disable_averaging.dev_attr.attr, &iio_dev_attr_enable_smbus_timeout.dev_attr.attr, &iio_dev_attr_powerdown.dev_attr.attr, &iio_dev_attr_fast_ad_clock.dev_attr.attr, &iio_dev_attr_da_high_resolution.dev_attr.attr, &iio_dev_attr_enable_proportion_DACA.dev_attr.attr, &iio_dev_attr_enable_proportion_DACB.dev_attr.attr, &iio_dev_attr_DAC_2Vref_channels_mask.dev_attr.attr, &iio_dev_attr_DAC_update_mode.dev_attr.attr, &iio_dev_attr_all_DAC_update_modes.dev_attr.attr, &iio_dev_attr_update_DAC.dev_attr.attr, &iio_dev_attr_DA_AB_Vref_bypass.dev_attr.attr, &iio_dev_attr_DA_CD_Vref_bypass.dev_attr.attr, &iio_dev_attr_DAC_internal_Vref.dev_attr.attr, &iio_dev_attr_VDD.dev_attr.attr, &iio_dev_attr_in_temp.dev_attr.attr, &iio_dev_attr_ex_temp.dev_attr.attr, &iio_dev_attr_in_temp_offset.dev_attr.attr, &iio_dev_attr_ex_temp_offset.dev_attr.attr, &iio_dev_attr_in_analog_temp_offset.dev_attr.attr, &iio_dev_attr_ex_analog_temp_offset.dev_attr.attr, &iio_dev_attr_DAC_A.dev_attr.attr, &iio_dev_attr_DAC_B.dev_attr.attr, &iio_dev_attr_DAC_C.dev_attr.attr, &iio_dev_attr_DAC_D.dev_attr.attr, &iio_dev_attr_device_id.dev_attr.attr, &iio_dev_attr_manufactorer_id.dev_attr.attr, &iio_dev_attr_device_rev.dev_attr.attr, &iio_dev_attr_bus_type.dev_attr.attr, NULL, }; static const struct attribute_group adt7316_attribute_group = { .attrs = adt7316_attributes, }; static struct attribute *adt7516_attributes[] = { &iio_dev_attr_all_modes.dev_attr.attr, &iio_dev_attr_mode.dev_attr.attr, &iio_dev_attr_select_ex_temp.dev_attr.attr, &iio_dev_attr_reset.dev_attr.attr, &iio_dev_attr_enabled.dev_attr.attr, &iio_dev_attr_ad_channel.dev_attr.attr, &iio_dev_attr_all_ad_channels.dev_attr.attr, &iio_dev_attr_disable_averaging.dev_attr.attr, &iio_dev_attr_enable_smbus_timeout.dev_attr.attr, &iio_dev_attr_powerdown.dev_attr.attr, &iio_dev_attr_fast_ad_clock.dev_attr.attr, &iio_dev_attr_AIN_internal_Vref.dev_attr.attr, &iio_dev_attr_da_high_resolution.dev_attr.attr, &iio_dev_attr_enable_proportion_DACA.dev_attr.attr, &iio_dev_attr_enable_proportion_DACB.dev_attr.attr, &iio_dev_attr_DAC_2Vref_channels_mask.dev_attr.attr, &iio_dev_attr_DAC_update_mode.dev_attr.attr, &iio_dev_attr_all_DAC_update_modes.dev_attr.attr, &iio_dev_attr_update_DAC.dev_attr.attr, &iio_dev_attr_DA_AB_Vref_bypass.dev_attr.attr, &iio_dev_attr_DA_CD_Vref_bypass.dev_attr.attr, &iio_dev_attr_DAC_internal_Vref.dev_attr.attr, &iio_dev_attr_VDD.dev_attr.attr, &iio_dev_attr_in_temp.dev_attr.attr, &iio_dev_attr_ex_temp_AIN1.dev_attr.attr, &iio_dev_attr_AIN2.dev_attr.attr, &iio_dev_attr_AIN3.dev_attr.attr, &iio_dev_attr_AIN4.dev_attr.attr, &iio_dev_attr_in_temp_offset.dev_attr.attr, &iio_dev_attr_ex_temp_offset.dev_attr.attr, &iio_dev_attr_in_analog_temp_offset.dev_attr.attr, &iio_dev_attr_ex_analog_temp_offset.dev_attr.attr, &iio_dev_attr_DAC_A.dev_attr.attr, &iio_dev_attr_DAC_B.dev_attr.attr, &iio_dev_attr_DAC_C.dev_attr.attr, &iio_dev_attr_DAC_D.dev_attr.attr, &iio_dev_attr_device_id.dev_attr.attr, &iio_dev_attr_manufactorer_id.dev_attr.attr, &iio_dev_attr_device_rev.dev_attr.attr, &iio_dev_attr_bus_type.dev_attr.attr, NULL, }; static const struct attribute_group adt7516_attribute_group = { .attrs = adt7516_attributes, }; static irqreturn_t adt7316_event_handler(int irq, void *private) { struct iio_dev *indio_dev = private; struct adt7316_chip_info *chip = iio_dev_get_devdata(indio_dev); u8 stat1, stat2; int ret; s64 time; ret = chip->bus.read(chip->bus.client, ADT7316_INT_STAT1, &stat1); if (!ret) { if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX) stat1 &= 0x1F; time = iio_get_time_ns(); if (stat1 & (1 << 0)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), time); if (stat1 & (1 << 1)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), time); if (stat1 & (1 << 2)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_TEMP, 1, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), time); if (stat1 & (1 << 3)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_TEMP, 1, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING), time); if (stat1 & (1 << 5)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_IN, 1, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), time); if (stat1 & (1 << 6)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_IN, 2, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), time); if (stat1 & (1 << 7)) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_IN, 3, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), time); } ret = chip->bus.read(chip->bus.client, ADT7316_INT_STAT2, &stat2); if (!ret) { if (stat2 & ADT7316_INT_MASK2_VDD) iio_push_event(chip->indio_dev, 0, IIO_UNMOD_EVENT_CODE(IIO_IN, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING), iio_get_time_ns()); } return IRQ_HANDLED; } /* * Show mask of enabled interrupts in Hex. */ static ssize_t adt7316_show_int_mask(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "0x%x\n", chip->int_mask); } /* * Set 1 to the mask in Hex to enabled interrupts. */ static ssize_t adt7316_set_int_mask(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; unsigned long data; int ret; u8 mask; ret = strict_strtoul(buf, 16, &data); if (ret || data >= ADT7316_VDD_INT_MASK + 1) return -EINVAL; if (data & ADT7316_VDD_INT_MASK) mask = 0; /* enable vdd int */ else mask = ADT7316_INT_MASK2_VDD; /* disable vdd int */ ret = chip->bus.write(chip->bus.client, ADT7316_INT_MASK2, mask); if (!ret) { chip->int_mask &= ~ADT7316_VDD_INT_MASK; chip->int_mask |= data & ADT7316_VDD_INT_MASK; } if (data & ADT7316_TEMP_AIN_INT_MASK) { if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX) /* mask in reg is opposite, set 1 to disable */ mask = (~data) & ADT7316_TEMP_INT_MASK; else /* mask in reg is opposite, set 1 to disable */ mask = (~data) & ADT7316_TEMP_AIN_INT_MASK; } ret = chip->bus.write(chip->bus.client, ADT7316_INT_MASK1, mask); chip->int_mask = mask; return len; } static inline ssize_t adt7316_show_ad_bound(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 val; int data; int ret; if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX && this_attr->address > ADT7316_EX_TEMP_LOW) return -EPERM; ret = chip->bus.read(chip->bus.client, this_attr->address, &val); if (ret) return -EIO; data = (int)val; if (!((chip->id & ID_FAMILY_MASK) == ID_ADT75XX && (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0)) { if (data & 0x80) data -= 256; } return sprintf(buf, "%d\n", data); } static inline ssize_t adt7316_set_ad_bound(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; long data; u8 val; int ret; if ((chip->id & ID_FAMILY_MASK) == ID_ADT73XX && this_attr->address > ADT7316_EX_TEMP_LOW) return -EPERM; ret = strict_strtol(buf, 10, &data); if (ret) return -EINVAL; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX && (chip->config1 & ADT7516_SEL_AIN1_2_EX_TEMP_MASK) == 0) { if (data > 255 || data < 0) return -EINVAL; } else { if (data > 127 || data < -128) return -EINVAL; if (data < 0) data += 256; } val = (u8)data; ret = chip->bus.write(chip->bus.client, this_attr->address, val); if (ret) return -EIO; return len; } static ssize_t adt7316_show_int_enabled(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return sprintf(buf, "%d\n", !!(chip->config1 & ADT7316_INT_EN)); } static ssize_t adt7316_set_int_enabled(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; u8 config1; int ret; config1 = chip->config1 & (~ADT7316_INT_EN); if (!memcmp(buf, "1", 1)) config1 |= ADT7316_INT_EN; ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, config1); if (ret) return -EIO; chip->config1 = config1; return len; } static IIO_DEVICE_ATTR(int_mask, S_IRUGO | S_IWUSR, adt7316_show_int_mask, adt7316_set_int_mask, 0); static IIO_DEVICE_ATTR(in_temp_high_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7316_IN_TEMP_HIGH); static IIO_DEVICE_ATTR(in_temp_low_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7316_IN_TEMP_LOW); static IIO_DEVICE_ATTR(ex_temp_high_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7316_EX_TEMP_HIGH); static IIO_DEVICE_ATTR(ex_temp_low_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7316_EX_TEMP_LOW); /* NASTY duplication to be fixed */ static IIO_DEVICE_ATTR(ex_temp_ain1_high_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7316_EX_TEMP_HIGH); static IIO_DEVICE_ATTR(ex_temp_ain1_low_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7316_EX_TEMP_LOW); static IIO_DEVICE_ATTR(ain2_high_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7516_AIN2_HIGH); static IIO_DEVICE_ATTR(ain2_low_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7516_AIN2_LOW); static IIO_DEVICE_ATTR(ain3_high_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7516_AIN3_HIGH); static IIO_DEVICE_ATTR(ain3_low_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7516_AIN3_LOW); static IIO_DEVICE_ATTR(ain4_high_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7516_AIN4_HIGH); static IIO_DEVICE_ATTR(ain4_low_value, S_IRUGO | S_IWUSR, adt7316_show_ad_bound, adt7316_set_ad_bound, ADT7516_AIN4_LOW); static IIO_DEVICE_ATTR(int_enabled, S_IRUGO | S_IWUSR, adt7316_show_int_enabled, adt7316_set_int_enabled, 0); static struct attribute *adt7316_event_attributes[] = { &iio_dev_attr_int_mask.dev_attr.attr, &iio_dev_attr_in_temp_high_value.dev_attr.attr, &iio_dev_attr_in_temp_low_value.dev_attr.attr, &iio_dev_attr_ex_temp_high_value.dev_attr.attr, &iio_dev_attr_ex_temp_low_value.dev_attr.attr, &iio_dev_attr_int_enabled.dev_attr.attr, NULL, }; static struct attribute_group adt7316_event_attribute_group = { .attrs = adt7316_event_attributes, }; static struct attribute *adt7516_event_attributes[] = { &iio_dev_attr_int_mask.dev_attr.attr, &iio_dev_attr_in_temp_high_value.dev_attr.attr, &iio_dev_attr_in_temp_low_value.dev_attr.attr, &iio_dev_attr_ex_temp_ain1_high_value.dev_attr.attr, &iio_dev_attr_ex_temp_ain1_low_value.dev_attr.attr, &iio_dev_attr_ain2_high_value.dev_attr.attr, &iio_dev_attr_ain2_low_value.dev_attr.attr, &iio_dev_attr_ain3_high_value.dev_attr.attr, &iio_dev_attr_ain3_low_value.dev_attr.attr, &iio_dev_attr_ain4_high_value.dev_attr.attr, &iio_dev_attr_ain4_low_value.dev_attr.attr, &iio_dev_attr_int_enabled.dev_attr.attr, NULL, }; static struct attribute_group adt7516_event_attribute_group = { .attrs = adt7516_event_attributes, }; #ifdef CONFIG_PM int adt7316_disable(struct device *dev) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return _adt7316_store_enabled(chip, 0); } EXPORT_SYMBOL(adt7316_disable); int adt7316_enable(struct device *dev) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; return _adt7316_store_enabled(chip, 1); } EXPORT_SYMBOL(adt7316_enable); #endif static const struct iio_info adt7316_info = { .attrs = &adt7316_attribute_group, .num_interrupt_lines = 1, .event_attrs = &adt7316_event_attribute_group, .driver_module = THIS_MODULE, }; static const struct iio_info adt7516_info = { .attrs = &adt7516_attribute_group, .num_interrupt_lines = 1, .event_attrs = &adt7516_event_attribute_group, .driver_module = THIS_MODULE, }; /* * device probe and remove */ int __devinit adt7316_probe(struct device *dev, struct adt7316_bus *bus, const char *name) { struct adt7316_chip_info *chip; unsigned short *adt7316_platform_data = dev->platform_data; int ret = 0; chip = kzalloc(sizeof(struct adt7316_chip_info), GFP_KERNEL); if (chip == NULL) return -ENOMEM; /* this is only used for device removal purposes */ dev_set_drvdata(dev, chip); chip->bus = *bus; if (name[4] == '3') chip->id = ID_ADT7316 + (name[6] - '6'); else if (name[4] == '5') chip->id = ID_ADT7516 + (name[6] - '6'); else return -ENODEV; chip->ldac_pin = adt7316_platform_data[1]; if (chip->ldac_pin) { chip->config3 |= ADT7316_DA_EN_VIA_DAC_LDCA; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) chip->config1 |= ADT7516_SEL_AIN3; } chip->int_mask = ADT7316_TEMP_INT_MASK | ADT7316_VDD_INT_MASK; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) chip->int_mask |= ADT7516_AIN_INT_MASK; chip->indio_dev = iio_allocate_device(0); if (chip->indio_dev == NULL) { ret = -ENOMEM; goto error_free_chip; } chip->indio_dev->dev.parent = dev; if ((chip->id & ID_FAMILY_MASK) == ID_ADT75XX) chip->indio_dev->info = &adt7516_info; else chip->indio_dev->info = &adt7316_info; chip->indio_dev->name = name; chip->indio_dev->dev_data = (void *)chip; chip->indio_dev->modes = INDIO_DIRECT_MODE; ret = iio_device_register(chip->indio_dev); if (ret) goto error_free_dev; if (chip->bus.irq > 0) { if (adt7316_platform_data[0]) chip->bus.irq_flags = adt7316_platform_data[0]; ret = request_threaded_irq(chip->bus.irq, NULL, &adt7316_event_handler, chip->bus.irq_flags | IRQF_ONESHOT, chip->indio_dev->name, chip->indio_dev); if (ret) goto error_unreg_dev; if (chip->bus.irq_flags & IRQF_TRIGGER_HIGH) chip->config1 |= ADT7316_INT_POLARITY; } ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG1, chip->config1); if (ret) { ret = -EIO; goto error_unreg_irq; } ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, chip->config3); if (ret) { ret = -EIO; goto error_unreg_irq; } dev_info(dev, "%s temperature sensor, ADC and DAC registered.\n", chip->indio_dev->name); return 0; error_unreg_irq: free_irq(chip->bus.irq, chip->indio_dev); error_unreg_dev: iio_device_unregister(chip->indio_dev); error_free_dev: iio_free_device(chip->indio_dev); error_free_chip: kfree(chip); return ret; } EXPORT_SYMBOL(adt7316_probe); int __devexit adt7316_remove(struct device *dev) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct adt7316_chip_info *chip = dev_info->dev_data; dev_set_drvdata(dev, NULL); if (chip->bus.irq) free_irq(chip->bus.irq, chip->indio_dev); iio_device_unregister(chip->indio_dev); iio_free_device(chip->indio_dev); kfree(chip); return 0; } EXPORT_SYMBOL(adt7316_remove); MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>"); MODULE_DESCRIPTION("Analog Devices ADT7316/7/8 and ADT7516/7/9 digital" " temperature sensor, ADC and DAC driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
AndroidSymmetry/Old_Sparky
drivers/clk/clk-axi-clkgen.c
2565
8579
/* * AXI clkgen driver * * Copyright 2012-2013 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> * * Licensed under the GPL-2. * */ #include <linux/platform_device.h> #include <linux/clk-provider.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/of.h> #include <linux/module.h> #include <linux/err.h> #define AXI_CLKGEN_REG_UPDATE_ENABLE 0x04 #define AXI_CLKGEN_REG_CLK_OUT1 0x08 #define AXI_CLKGEN_REG_CLK_OUT2 0x0c #define AXI_CLKGEN_REG_CLK_DIV 0x10 #define AXI_CLKGEN_REG_CLK_FB1 0x14 #define AXI_CLKGEN_REG_CLK_FB2 0x18 #define AXI_CLKGEN_REG_LOCK1 0x1c #define AXI_CLKGEN_REG_LOCK2 0x20 #define AXI_CLKGEN_REG_LOCK3 0x24 #define AXI_CLKGEN_REG_FILTER1 0x28 #define AXI_CLKGEN_REG_FILTER2 0x2c struct axi_clkgen { void __iomem *base; struct clk_hw clk_hw; }; static uint32_t axi_clkgen_lookup_filter(unsigned int m) { switch (m) { case 0: return 0x01001990; case 1: return 0x01001190; case 2: return 0x01009890; case 3: return 0x01001890; case 4: return 0x01008890; case 5 ... 8: return 0x01009090; case 9 ... 11: return 0x01000890; case 12: return 0x08009090; case 13 ... 22: return 0x01001090; case 23 ... 36: return 0x01008090; case 37 ... 46: return 0x08001090; default: return 0x08008090; } } static const uint32_t axi_clkgen_lock_table[] = { 0x060603e8, 0x060603e8, 0x080803e8, 0x0b0b03e8, 0x0e0e03e8, 0x111103e8, 0x131303e8, 0x161603e8, 0x191903e8, 0x1c1c03e8, 0x1f1f0384, 0x1f1f0339, 0x1f1f02ee, 0x1f1f02bc, 0x1f1f028a, 0x1f1f0271, 0x1f1f023f, 0x1f1f0226, 0x1f1f020d, 0x1f1f01f4, 0x1f1f01db, 0x1f1f01c2, 0x1f1f01a9, 0x1f1f0190, 0x1f1f0190, 0x1f1f0177, 0x1f1f015e, 0x1f1f015e, 0x1f1f0145, 0x1f1f0145, 0x1f1f012c, 0x1f1f012c, 0x1f1f012c, 0x1f1f0113, 0x1f1f0113, 0x1f1f0113, }; static uint32_t axi_clkgen_lookup_lock(unsigned int m) { if (m < ARRAY_SIZE(axi_clkgen_lock_table)) return axi_clkgen_lock_table[m]; return 0x1f1f00fa; } static const unsigned int fpfd_min = 10000; static const unsigned int fpfd_max = 300000; static const unsigned int fvco_min = 600000; static const unsigned int fvco_max = 1200000; static void axi_clkgen_calc_params(unsigned long fin, unsigned long fout, unsigned int *best_d, unsigned int *best_m, unsigned int *best_dout) { unsigned long d, d_min, d_max, _d_min, _d_max; unsigned long m, m_min, m_max; unsigned long f, dout, best_f, fvco; fin /= 1000; fout /= 1000; best_f = ULONG_MAX; *best_d = 0; *best_m = 0; *best_dout = 0; d_min = max_t(unsigned long, DIV_ROUND_UP(fin, fpfd_max), 1); d_max = min_t(unsigned long, fin / fpfd_min, 80); m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min, fin) * d_min, 1); m_max = min_t(unsigned long, fvco_max * d_max / fin, 64); for (m = m_min; m <= m_max; m++) { _d_min = max(d_min, DIV_ROUND_UP(fin * m, fvco_max)); _d_max = min(d_max, fin * m / fvco_min); for (d = _d_min; d <= _d_max; d++) { fvco = fin * m / d; dout = DIV_ROUND_CLOSEST(fvco, fout); dout = clamp_t(unsigned long, dout, 1, 128); f = fvco / dout; if (abs(f - fout) < abs(best_f - fout)) { best_f = f; *best_d = d; *best_m = m; *best_dout = dout; if (best_f == fout) return; } } } } static void axi_clkgen_calc_clk_params(unsigned int divider, unsigned int *low, unsigned int *high, unsigned int *edge, unsigned int *nocount) { if (divider == 1) *nocount = 1; else *nocount = 0; *high = divider / 2; *edge = divider % 2; *low = divider - *high; } static void axi_clkgen_write(struct axi_clkgen *axi_clkgen, unsigned int reg, unsigned int val) { writel(val, axi_clkgen->base + reg); } static void axi_clkgen_read(struct axi_clkgen *axi_clkgen, unsigned int reg, unsigned int *val) { *val = readl(axi_clkgen->base + reg); } static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw) { return container_of(clk_hw, struct axi_clkgen, clk_hw); } static int axi_clkgen_set_rate(struct clk_hw *clk_hw, unsigned long rate, unsigned long parent_rate) { struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw); unsigned int d, m, dout; unsigned int nocount; unsigned int high; unsigned int edge; unsigned int low; uint32_t filter; uint32_t lock; if (parent_rate == 0 || rate == 0) return -EINVAL; axi_clkgen_calc_params(parent_rate, rate, &d, &m, &dout); if (d == 0 || dout == 0 || m == 0) return -EINVAL; filter = axi_clkgen_lookup_filter(m - 1); lock = axi_clkgen_lookup_lock(m - 1); axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_UPDATE_ENABLE, 0); axi_clkgen_calc_clk_params(dout, &low, &high, &edge, &nocount); axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT1, (high << 6) | low); axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT2, (edge << 7) | (nocount << 6)); axi_clkgen_calc_clk_params(d, &low, &high, &edge, &nocount); axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_DIV, (edge << 13) | (nocount << 12) | (high << 6) | low); axi_clkgen_calc_clk_params(m, &low, &high, &edge, &nocount); axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_FB1, (high << 6) | low); axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_FB2, (edge << 7) | (nocount << 6)); axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK1, lock & 0x3ff); axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK2, (((lock >> 16) & 0x1f) << 10) | 0x1); axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK3, (((lock >> 24) & 0x1f) << 10) | 0x3e9); axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_FILTER1, filter >> 16); axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_FILTER2, filter); axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_UPDATE_ENABLE, 1); return 0; } static long axi_clkgen_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { unsigned int d, m, dout; axi_clkgen_calc_params(*parent_rate, rate, &d, &m, &dout); if (d == 0 || dout == 0 || m == 0) return -EINVAL; return *parent_rate / d * m / dout; } static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw, unsigned long parent_rate) { struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw); unsigned int d, m, dout; unsigned int reg; unsigned long long tmp; axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT1, &reg); dout = (reg & 0x3f) + ((reg >> 6) & 0x3f); axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_DIV, &reg); d = (reg & 0x3f) + ((reg >> 6) & 0x3f); axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_FB1, &reg); m = (reg & 0x3f) + ((reg >> 6) & 0x3f); if (d == 0 || dout == 0) return 0; tmp = (unsigned long long)(parent_rate / d) * m; do_div(tmp, dout); if (tmp > ULONG_MAX) return ULONG_MAX; return tmp; } static const struct clk_ops axi_clkgen_ops = { .recalc_rate = axi_clkgen_recalc_rate, .round_rate = axi_clkgen_round_rate, .set_rate = axi_clkgen_set_rate, }; static int axi_clkgen_probe(struct platform_device *pdev) { struct axi_clkgen *axi_clkgen; struct clk_init_data init; const char *parent_name; const char *clk_name; struct resource *mem; struct clk *clk; axi_clkgen = devm_kzalloc(&pdev->dev, sizeof(*axi_clkgen), GFP_KERNEL); if (!axi_clkgen) return -ENOMEM; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(axi_clkgen->base)) return PTR_ERR(axi_clkgen->base); parent_name = of_clk_get_parent_name(pdev->dev.of_node, 0); if (!parent_name) return -EINVAL; clk_name = pdev->dev.of_node->name; of_property_read_string(pdev->dev.of_node, "clock-output-names", &clk_name); init.name = clk_name; init.ops = &axi_clkgen_ops; init.flags = 0; init.parent_names = &parent_name; init.num_parents = 1; axi_clkgen->clk_hw.init = &init; clk = devm_clk_register(&pdev->dev, &axi_clkgen->clk_hw); if (IS_ERR(clk)) return PTR_ERR(clk); return of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get, clk); } static int axi_clkgen_remove(struct platform_device *pdev) { of_clk_del_provider(pdev->dev.of_node); return 0; } static const struct of_device_id axi_clkgen_ids[] = { { .compatible = "adi,axi-clkgen-1.00.a" }, { }, }; MODULE_DEVICE_TABLE(of, axi_clkgen_ids); static struct platform_driver axi_clkgen_driver = { .driver = { .name = "adi-axi-clkgen", .owner = THIS_MODULE, .of_match_table = axi_clkgen_ids, }, .probe = axi_clkgen_probe, .remove = axi_clkgen_remove, }; module_platform_driver(axi_clkgen_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("Driver for the Analog Devices' AXI clkgen pcore clock generator");
gpl-2.0
gasseluk/htc-vision-kernel-ics
drivers/net/wireless/p54/txrx.c
2821
23748
/* * Common code for mac80211 Prism54 drivers * * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de> * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> * * Based on: * - the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. * - stlc45xx driver * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <net/mac80211.h> #include "p54.h" #include "lmac.h" #ifdef P54_MM_DEBUG static void p54_dump_tx_queue(struct p54_common *priv) { unsigned long flags; struct ieee80211_tx_info *info; struct p54_tx_info *range; struct sk_buff *skb; struct p54_hdr *hdr; unsigned int i = 0; u32 prev_addr; u32 largest_hole = 0, free; spin_lock_irqsave(&priv->tx_queue.lock, flags); wiphy_debug(priv->hw->wiphy, "/ --- tx queue dump (%d entries) ---\n", skb_queue_len(&priv->tx_queue)); prev_addr = priv->rx_start; skb_queue_walk(&priv->tx_queue, skb) { info = IEEE80211_SKB_CB(skb); range = (void *) info->rate_driver_data; hdr = (void *) skb->data; free = range->start_addr - prev_addr; wiphy_debug(priv->hw->wiphy, "| [%02d] => [skb:%p skb_len:0x%04x " "hdr:{flags:%02x len:%04x req_id:%04x type:%02x} " "mem:{start:%04x end:%04x, free:%d}]\n", i++, skb, skb->len, le16_to_cpu(hdr->flags), le16_to_cpu(hdr->len), le32_to_cpu(hdr->req_id), le16_to_cpu(hdr->type), range->start_addr, range->end_addr, free); prev_addr = range->end_addr; largest_hole = max(largest_hole, free); } free = priv->rx_end - prev_addr; largest_hole = max(largest_hole, free); wiphy_debug(priv->hw->wiphy, "\\ --- [free: %d], largest free block: %d ---\n", free, largest_hole); spin_unlock_irqrestore(&priv->tx_queue.lock, flags); } #endif /* P54_MM_DEBUG */ /* * So, the firmware is somewhat stupid and doesn't know what places in its * memory incoming data should go to. By poking around in the firmware, we * can find some unused memory to upload our packets to. However, data that we * want the card to TX needs to stay intact until the card has told us that * it is done with it. This function finds empty places we can upload to and * marks allocated areas as reserved if necessary. p54_find_and_unlink_skb or * p54_free_skb frees allocated areas. */ static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb) { struct sk_buff *entry, *target_skb = NULL; struct ieee80211_tx_info *info; struct p54_tx_info *range; struct p54_hdr *data = (void *) skb->data; unsigned long flags; u32 last_addr = priv->rx_start; u32 target_addr = priv->rx_start; u16 len = priv->headroom + skb->len + priv->tailroom + 3; info = IEEE80211_SKB_CB(skb); range = (void *) info->rate_driver_data; len = (range->extra_len + len) & ~0x3; spin_lock_irqsave(&priv->tx_queue.lock, flags); if (unlikely(skb_queue_len(&priv->tx_queue) == 32)) { /* * The tx_queue is now really full. * * TODO: check if the device has crashed and reset it. */ spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return -EBUSY; } skb_queue_walk(&priv->tx_queue, entry) { u32 hole_size; info = IEEE80211_SKB_CB(entry); range = (void *) info->rate_driver_data; hole_size = range->start_addr - last_addr; if (!target_skb && hole_size >= len) { target_skb = entry->prev; hole_size -= len; target_addr = last_addr; break; } last_addr = range->end_addr; } if (unlikely(!target_skb)) { if (priv->rx_end - last_addr >= len) { target_skb = priv->tx_queue.prev; if (!skb_queue_empty(&priv->tx_queue)) { info = IEEE80211_SKB_CB(target_skb); range = (void *)info->rate_driver_data; target_addr = range->end_addr; } } else { spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return -ENOSPC; } } info = IEEE80211_SKB_CB(skb); range = (void *) info->rate_driver_data; range->start_addr = target_addr; range->end_addr = target_addr + len; data->req_id = cpu_to_le32(target_addr + priv->headroom); if (IS_DATA_FRAME(skb) && unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON)) priv->beacon_req_id = data->req_id; __skb_queue_after(&priv->tx_queue, target_skb, skb); spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return 0; } static void p54_tx_pending(struct p54_common *priv) { struct sk_buff *skb; int ret; skb = skb_dequeue(&priv->tx_pending); if (unlikely(!skb)) return ; ret = p54_assign_address(priv, skb); if (unlikely(ret)) skb_queue_head(&priv->tx_pending, skb); else priv->tx(priv->hw, skb); } static void p54_wake_queues(struct p54_common *priv) { unsigned long flags; unsigned int i; if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) return ; p54_tx_pending(priv); spin_lock_irqsave(&priv->tx_stats_lock, flags); for (i = 0; i < priv->hw->queues; i++) { if (priv->tx_stats[i + P54_QUEUE_DATA].len < priv->tx_stats[i + P54_QUEUE_DATA].limit) ieee80211_wake_queue(priv->hw, i); } spin_unlock_irqrestore(&priv->tx_stats_lock, flags); } static int p54_tx_qos_accounting_alloc(struct p54_common *priv, struct sk_buff *skb, const u16 p54_queue) { struct p54_tx_queue_stats *queue; unsigned long flags; if (WARN_ON(p54_queue >= P54_QUEUE_NUM)) return -EINVAL; queue = &priv->tx_stats[p54_queue]; spin_lock_irqsave(&priv->tx_stats_lock, flags); if (unlikely(queue->len >= queue->limit && IS_QOS_QUEUE(p54_queue))) { spin_unlock_irqrestore(&priv->tx_stats_lock, flags); return -ENOSPC; } queue->len++; queue->count++; if (unlikely(queue->len == queue->limit && IS_QOS_QUEUE(p54_queue))) { u16 ac_queue = p54_queue - P54_QUEUE_DATA; ieee80211_stop_queue(priv->hw, ac_queue); } spin_unlock_irqrestore(&priv->tx_stats_lock, flags); return 0; } static void p54_tx_qos_accounting_free(struct p54_common *priv, struct sk_buff *skb) { if (IS_DATA_FRAME(skb)) { unsigned long flags; spin_lock_irqsave(&priv->tx_stats_lock, flags); priv->tx_stats[GET_HW_QUEUE(skb)].len--; spin_unlock_irqrestore(&priv->tx_stats_lock, flags); if (unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON)) { if (priv->beacon_req_id == GET_REQ_ID(skb)) { /* this is the active beacon set anymore */ priv->beacon_req_id = 0; } complete(&priv->beacon_comp); } } p54_wake_queues(priv); } void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54_common *priv = dev->priv; if (unlikely(!skb)) return ; skb_unlink(skb, &priv->tx_queue); p54_tx_qos_accounting_free(priv, skb); dev_kfree_skb_any(skb); } EXPORT_SYMBOL_GPL(p54_free_skb); static struct sk_buff *p54_find_and_unlink_skb(struct p54_common *priv, const __le32 req_id) { struct sk_buff *entry; unsigned long flags; spin_lock_irqsave(&priv->tx_queue.lock, flags); skb_queue_walk(&priv->tx_queue, entry) { struct p54_hdr *hdr = (struct p54_hdr *) entry->data; if (hdr->req_id == req_id) { __skb_unlink(entry, &priv->tx_queue); spin_unlock_irqrestore(&priv->tx_queue.lock, flags); p54_tx_qos_accounting_free(priv, entry); return entry; } } spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return NULL; } void p54_tx(struct p54_common *priv, struct sk_buff *skb) { skb_queue_tail(&priv->tx_pending, skb); p54_tx_pending(priv); } static int p54_rssi_to_dbm(struct p54_common *priv, int rssi) { if (priv->rxhw != 5) { return ((rssi * priv->cur_rssi->mul) / 64 + priv->cur_rssi->add) / 4; } else { /* * TODO: find the correct formula */ return rssi / 2 - 110; } } /* * Even if the firmware is capable of dealing with incoming traffic, * while dozing, we have to prepared in case mac80211 uses PS-POLL * to retrieve outstanding frames from our AP. * (see comment in net/mac80211/mlme.c @ line 1993) */ static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *) skb->data; struct ieee80211_tim_ie *tim_ie; u8 *tim; u8 tim_len; bool new_psm; /* only beacons have a TIM IE */ if (!ieee80211_is_beacon(hdr->frame_control)) return; if (!priv->aid) return; /* only consider beacons from the associated BSSID */ if (compare_ether_addr(hdr->addr3, priv->bssid)) return; tim = p54_find_ie(skb, WLAN_EID_TIM); if (!tim) return; tim_len = tim[1]; tim_ie = (struct ieee80211_tim_ie *) &tim[2]; new_psm = ieee80211_check_tim(tim_ie, tim_len, priv->aid); if (new_psm != priv->powersave_override) { priv->powersave_override = new_psm; p54_set_ps(priv); } } static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb) { struct p54_rx_data *hdr = (struct p54_rx_data *) skb->data; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); u16 freq = le16_to_cpu(hdr->freq); size_t header_len = sizeof(*hdr); u32 tsf32; u8 rate = hdr->rate & 0xf; /* * If the device is in a unspecified state we have to * ignore all data frames. Else we could end up with a * nasty crash. */ if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) return 0; if (!(hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_IN_FCS_GOOD))) return 0; if (hdr->decrypt_status == P54_DECRYPT_OK) rx_status->flag |= RX_FLAG_DECRYPTED; if ((hdr->decrypt_status == P54_DECRYPT_FAIL_MICHAEL) || (hdr->decrypt_status == P54_DECRYPT_FAIL_TKIP)) rx_status->flag |= RX_FLAG_MMIC_ERROR; rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi); if (hdr->rate & 0x10) rx_status->flag |= RX_FLAG_SHORTPRE; if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ) rx_status->rate_idx = (rate < 4) ? 0 : rate - 4; else rx_status->rate_idx = rate; rx_status->freq = freq; rx_status->band = priv->hw->conf.channel->band; rx_status->antenna = hdr->antenna; tsf32 = le32_to_cpu(hdr->tsf32); if (tsf32 < priv->tsf_low32) priv->tsf_high32++; rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32; priv->tsf_low32 = tsf32; rx_status->flag |= RX_FLAG_MACTIME_MPDU; if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) header_len += hdr->align[0]; skb_pull(skb, header_len); skb_trim(skb, le16_to_cpu(hdr->len)); if (unlikely(priv->hw->conf.flags & IEEE80211_CONF_PS)) p54_pspoll_workaround(priv, skb); ieee80211_rx_irqsafe(priv->hw, skb); ieee80211_queue_delayed_work(priv->hw, &priv->work, msecs_to_jiffies(P54_STATISTICS_UPDATE)); return -1; } static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data; struct ieee80211_tx_info *info; struct p54_hdr *entry_hdr; struct p54_tx_data *entry_data; struct sk_buff *entry; unsigned int pad = 0, frame_len; int count, idx; entry = p54_find_and_unlink_skb(priv, hdr->req_id); if (unlikely(!entry)) return ; frame_len = entry->len; info = IEEE80211_SKB_CB(entry); entry_hdr = (struct p54_hdr *) entry->data; entry_data = (struct p54_tx_data *) entry_hdr->data; priv->stats.dot11ACKFailureCount += payload->tries - 1; /* * Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are * generated by the driver. Therefore tx_status is bogus * and we don't want to confuse the mac80211 stack. */ if (unlikely(entry_data->hw_queue < P54_QUEUE_FWSCAN)) { dev_kfree_skb_any(entry); return ; } /* * Clear manually, ieee80211_tx_info_clear_status would * clear the counts too and we need them. */ memset(&info->status.ampdu_ack_len, 0, sizeof(struct ieee80211_tx_info) - offsetof(struct ieee80211_tx_info, status.ampdu_ack_len)); BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23); if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) pad = entry_data->align[0]; /* walk through the rates array and adjust the counts */ count = payload->tries; for (idx = 0; idx < 4; idx++) { if (count >= info->status.rates[idx].count) { count -= info->status.rates[idx].count; } else if (count > 0) { info->status.rates[idx].count = count; count = 0; } else { info->status.rates[idx].idx = -1; info->status.rates[idx].count = 0; } } if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !(payload->status & P54_TX_FAILED)) info->flags |= IEEE80211_TX_STAT_ACK; if (payload->status & P54_TX_PSM_CANCELLED) info->flags |= IEEE80211_TX_STAT_TX_FILTERED; info->status.ack_signal = p54_rssi_to_dbm(priv, (int)payload->ack_rssi); /* Undo all changes to the frame. */ switch (entry_data->key_type) { case P54_CRYPTO_TKIPMICHAEL: { u8 *iv = (u8 *)(entry_data->align + pad + entry_data->crypt_offset); /* Restore the original TKIP IV. */ iv[2] = iv[0]; iv[0] = iv[1]; iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */ frame_len -= 12; /* remove TKIP_MMIC + TKIP_ICV */ break; } case P54_CRYPTO_AESCCMP: frame_len -= 8; /* remove CCMP_MIC */ break; case P54_CRYPTO_WEP: frame_len -= 4; /* remove WEP_ICV */ break; } skb_trim(entry, frame_len); skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); ieee80211_tx_status_irqsafe(priv->hw, entry); } static void p54_rx_eeprom_readback(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data; struct sk_buff *tmp; if (!priv->eeprom) return ; if (priv->fw_var >= 0x509) { memcpy(priv->eeprom, eeprom->v2.data, le16_to_cpu(eeprom->v2.len)); } else { memcpy(priv->eeprom, eeprom->v1.data, le16_to_cpu(eeprom->v1.len)); } priv->eeprom = NULL; tmp = p54_find_and_unlink_skb(priv, hdr->req_id); dev_kfree_skb_any(tmp); complete(&priv->eeprom_comp); } static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; struct p54_statistics *stats = (struct p54_statistics *) hdr->data; struct sk_buff *tmp; u32 tsf32; if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) return ; tsf32 = le32_to_cpu(stats->tsf32); if (tsf32 < priv->tsf_low32) priv->tsf_high32++; priv->tsf_low32 = tsf32; priv->stats.dot11RTSFailureCount = le32_to_cpu(stats->rts_fail); priv->stats.dot11RTSSuccessCount = le32_to_cpu(stats->rts_success); priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs); priv->noise = p54_rssi_to_dbm(priv, le32_to_cpu(stats->noise)); tmp = p54_find_and_unlink_skb(priv, hdr->req_id); dev_kfree_skb_any(tmp); } static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; struct p54_trap *trap = (struct p54_trap *) hdr->data; u16 event = le16_to_cpu(trap->event); u16 freq = le16_to_cpu(trap->frequency); switch (event) { case P54_TRAP_BEACON_TX: break; case P54_TRAP_RADAR: wiphy_info(priv->hw->wiphy, "radar (freq:%d MHz)\n", freq); break; case P54_TRAP_NO_BEACON: if (priv->vif) ieee80211_beacon_loss(priv->vif); break; case P54_TRAP_SCAN: break; case P54_TRAP_TBTT: break; case P54_TRAP_TIMER: break; case P54_TRAP_FAA_RADIO_OFF: wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); break; case P54_TRAP_FAA_RADIO_ON: wiphy_rfkill_set_hw_state(priv->hw->wiphy, false); break; default: wiphy_info(priv->hw->wiphy, "received event:%x freq:%d\n", event, freq); break; } } static int p54_rx_control(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; switch (le16_to_cpu(hdr->type)) { case P54_CONTROL_TYPE_TXDONE: p54_rx_frame_sent(priv, skb); break; case P54_CONTROL_TYPE_TRAP: p54_rx_trap(priv, skb); break; case P54_CONTROL_TYPE_BBP: break; case P54_CONTROL_TYPE_STAT_READBACK: p54_rx_stats(priv, skb); break; case P54_CONTROL_TYPE_EEPROM_READBACK: p54_rx_eeprom_readback(priv, skb); break; default: wiphy_debug(priv->hw->wiphy, "not handling 0x%02x type control frame\n", le16_to_cpu(hdr->type)); break; } return 0; } /* returns zero if skb can be reused */ int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54_common *priv = dev->priv; u16 type = le16_to_cpu(*((__le16 *)skb->data)); if (type & P54_HDR_FLAG_CONTROL) return p54_rx_control(priv, skb); else return p54_rx_data(priv, skb); } EXPORT_SYMBOL_GPL(p54_rx); static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, struct ieee80211_tx_info *info, u8 *queue, u32 *extra_len, u16 *flags, u16 *aid, bool *burst_possible) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_data_qos(hdr->frame_control)) *burst_possible = true; else *burst_possible = false; if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR; if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE) *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL; if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL; *queue = skb_get_queue_mapping(skb) + P54_QUEUE_DATA; switch (priv->mode) { case NL80211_IFTYPE_MONITOR: /* * We have to set P54_HDR_FLAG_DATA_OUT_PROMISC for * every frame in promiscuous/monitor mode. * see STSW45x0C LMAC API - page 12. */ *aid = 0; *flags |= P54_HDR_FLAG_DATA_OUT_PROMISC; break; case NL80211_IFTYPE_STATION: *aid = 1; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { *aid = 0; *queue = P54_QUEUE_CAB; return; } if (unlikely(ieee80211_is_mgmt(hdr->frame_control))) { if (ieee80211_is_probe_resp(hdr->frame_control)) { *aid = 0; *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP | P54_HDR_FLAG_DATA_OUT_NOCANCEL; return; } else if (ieee80211_is_beacon(hdr->frame_control)) { *aid = 0; if (info->flags & IEEE80211_TX_CTL_INJECTED) { /* * Injecting beacons on top of a AP is * not a good idea... nevertheless, * it should be doable. */ return; } *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP; *queue = P54_QUEUE_BEACON; *extra_len = IEEE80211_MAX_TIM_LEN; return; } } if (info->control.sta) *aid = info->control.sta->aid; break; } } static u8 p54_convert_algo(u32 cipher) { switch (cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: return P54_CRYPTO_WEP; case WLAN_CIPHER_SUITE_TKIP: return P54_CRYPTO_TKIPMICHAEL; case WLAN_CIPHER_SUITE_CCMP: return P54_CRYPTO_AESCCMP; default: return 0; } } void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54_common *priv = dev->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct p54_tx_info *p54info; struct p54_hdr *hdr; struct p54_tx_data *txhdr; unsigned int padding, len, extra_len = 0; int i, j, ridx; u16 hdr_flags = 0, aid = 0; u8 rate, queue = 0, crypt_offset = 0; u8 cts_rate = 0x20; u8 rc_flags; u8 calculated_tries[4]; u8 nrates = 0, nremaining = 8; bool burst_allowed = false; p54_tx_80211_header(priv, skb, info, &queue, &extra_len, &hdr_flags, &aid, &burst_allowed); if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { dev_kfree_skb_any(skb); return; } padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; len = skb->len; if (info->control.hw_key) { crypt_offset = ieee80211_get_hdrlen_from_skb(skb); if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { u8 *iv = (u8 *)(skb->data + crypt_offset); /* * The firmware excepts that the IV has to have * this special format */ iv[1] = iv[0]; iv[0] = iv[2]; iv[2] = 0; } } txhdr = (struct p54_tx_data *) skb_push(skb, sizeof(*txhdr) + padding); hdr = (struct p54_hdr *) skb_push(skb, sizeof(*hdr)); if (padding) hdr_flags |= P54_HDR_FLAG_DATA_ALIGN; hdr->type = cpu_to_le16(aid); hdr->rts_tries = info->control.rates[0].count; /* * we register the rates in perfect order, and * RTS/CTS won't happen on 5 GHz */ cts_rate = info->control.rts_cts_rate_idx; memset(&txhdr->rateset, 0, sizeof(txhdr->rateset)); /* see how many rates got used */ for (i = 0; i < dev->max_rates; i++) { if (info->control.rates[i].idx < 0) break; nrates++; } /* limit tries to 8/nrates per rate */ for (i = 0; i < nrates; i++) { /* * The magic expression here is equivalent to 8/nrates for * all values that matter, but avoids division and jumps. * Note that nrates can only take the values 1 through 4. */ calculated_tries[i] = min_t(int, ((15 >> nrates) | 1) + 1, info->control.rates[i].count); nremaining -= calculated_tries[i]; } /* if there are tries left, distribute from back to front */ for (i = nrates - 1; nremaining > 0 && i >= 0; i--) { int tmp = info->control.rates[i].count - calculated_tries[i]; if (tmp <= 0) continue; /* RC requested more tries at this rate */ tmp = min_t(int, tmp, nremaining); calculated_tries[i] += tmp; nremaining -= tmp; } ridx = 0; for (i = 0; i < nrates && ridx < 8; i++) { /* we register the rates in perfect order */ rate = info->control.rates[i].idx; if (info->band == IEEE80211_BAND_5GHZ) rate += 4; /* store the count we actually calculated for TX status */ info->control.rates[i].count = calculated_tries[i]; rc_flags = info->control.rates[i].flags; if (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) { rate |= 0x10; cts_rate |= 0x10; } if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { burst_allowed = false; rate |= 0x40; } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { rate |= 0x20; burst_allowed = false; } for (j = 0; j < calculated_tries[i] && ridx < 8; j++) { txhdr->rateset[ridx] = rate; ridx++; } } if (burst_allowed) hdr_flags |= P54_HDR_FLAG_DATA_OUT_BURST; /* TODO: enable bursting */ hdr->flags = cpu_to_le16(hdr_flags); hdr->tries = ridx; txhdr->rts_rate_idx = 0; if (info->control.hw_key) { txhdr->key_type = p54_convert_algo(info->control.hw_key->cipher); txhdr->key_len = min((u8)16, info->control.hw_key->keylen); memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len); if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { /* reserve space for the MIC key */ len += 8; memcpy(skb_put(skb, 8), &(info->control.hw_key->key [NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]), 8); } /* reserve some space for ICV */ len += info->control.hw_key->icv_len; memset(skb_put(skb, info->control.hw_key->icv_len), 0, info->control.hw_key->icv_len); } else { txhdr->key_type = 0; txhdr->key_len = 0; } txhdr->crypt_offset = crypt_offset; txhdr->hw_queue = queue; txhdr->backlog = priv->tx_stats[queue].len - 1; memset(txhdr->durations, 0, sizeof(txhdr->durations)); txhdr->tx_antenna = ((info->antenna_sel_tx == 0) ? 2 : info->antenna_sel_tx - 1) & priv->tx_diversity_mask; if (priv->rxhw == 5) { txhdr->longbow.cts_rate = cts_rate; txhdr->longbow.output_power = cpu_to_le16(priv->output_power); } else { txhdr->normal.output_power = priv->output_power; txhdr->normal.cts_rate = cts_rate; } if (padding) txhdr->align[0] = padding; hdr->len = cpu_to_le16(len); /* modifies skb->cb and with it info, so must be last! */ p54info = (void *) info->rate_driver_data; p54info->extra_len = extra_len; p54_tx(priv, skb); }
gpl-2.0
invisiblek/android_kernel_lge_vs450pp
drivers/video/msm/mdp_vsync.c
2821
12660
/* Copyright (c) 2008-2009, 2012 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/hrtimer.h> #include <linux/vmalloc.h> #include <linux/clk.h> #include <mach/hardware.h> #include <linux/io.h> #include <asm/system.h> #include <asm/mach-types.h> #include <linux/semaphore.h> #include <linux/uaccess.h> #include <mach/gpio.h> #include "mdp.h" #include "msm_fb.h" #include "mddihost.h" #ifdef CONFIG_FB_MSM_MDP40 #include "mdp4.h" #define MDP_SYNC_CFG_0 0x100 #define MDP_SYNC_STATUS_0 0x10c #define MDP_SYNC_CFG_1 0x104 #define MDP_SYNC_STATUS_1 0x110 #define MDP_PRIM_VSYNC_OUT_CTRL 0x118 #define MDP_SEC_VSYNC_OUT_CTRL 0x11C #define MDP_VSYNC_SEL 0x124 #define MDP_PRIM_VSYNC_INIT_VAL 0x128 #define MDP_SEC_VSYNC_INIT_VAL 0x12C #else #define MDP_SYNC_CFG_0 0x300 #define MDP_SYNC_STATUS_0 0x30c #define MDP_PRIM_VSYNC_OUT_CTRL 0x318 #define MDP_PRIM_VSYNC_INIT_VAL 0x328 #endif extern mddi_lcd_type mddi_lcd_idx; extern spinlock_t mdp_spin_lock; extern struct workqueue_struct *mdp_vsync_wq; extern int lcdc_mode; extern int vsync_mode; #ifdef MDP_HW_VSYNC int vsync_above_th = 4; int vsync_start_th = 1; int vsync_load_cnt; int vsync_clk_status; DEFINE_MUTEX(vsync_clk_lock); static DEFINE_SPINLOCK(vsync_timer_lock); static struct clk *mdp_vsync_clk; static struct msm_fb_data_type *vsync_mfd; static unsigned char timer_shutdown_flag; static uint32 vsync_cnt_cfg; void vsync_clk_prepare_enable(void) { if (mdp_vsync_clk) clk_prepare_enable(mdp_vsync_clk); } void vsync_clk_disable_unprepare(void) { if (mdp_vsync_clk) clk_disable_unprepare(mdp_vsync_clk); } void mdp_hw_vsync_clk_enable(struct msm_fb_data_type *mfd) { if (vsync_clk_status == 1) return; mutex_lock(&vsync_clk_lock); if (mfd->use_mdp_vsync) { clk_prepare_enable(mdp_vsync_clk); vsync_clk_status = 1; } mutex_unlock(&vsync_clk_lock); } void mdp_hw_vsync_clk_disable(struct msm_fb_data_type *mfd) { if (vsync_clk_status == 0) return; mutex_lock(&vsync_clk_lock); if (mfd->use_mdp_vsync) { clk_disable_unprepare(mdp_vsync_clk); vsync_clk_status = 0; } mutex_unlock(&vsync_clk_lock); } static void mdp_set_vsync(unsigned long data); void mdp_vsync_clk_enable(void) { if (vsync_mfd) { mdp_hw_vsync_clk_enable(vsync_mfd); if (!vsync_mfd->vsync_resync_timer.function) mdp_set_vsync((unsigned long) vsync_mfd); } } void mdp_vsync_clk_disable(void) { if (vsync_mfd) { if (vsync_mfd->vsync_resync_timer.function) { spin_lock(&vsync_timer_lock); timer_shutdown_flag = 1; spin_unlock(&vsync_timer_lock); del_timer_sync(&vsync_mfd->vsync_resync_timer); spin_lock(&vsync_timer_lock); timer_shutdown_flag = 0; spin_unlock(&vsync_timer_lock); vsync_mfd->vsync_resync_timer.function = NULL; } mdp_hw_vsync_clk_disable(vsync_mfd); } } #endif static void mdp_set_vsync(unsigned long data) { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data; struct msm_fb_panel_data *pdata = NULL; pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data; vsync_mfd = mfd; init_timer(&mfd->vsync_resync_timer); if ((pdata) && (pdata->set_vsync_notifier == NULL)) return; if ((mfd->panel_info.lcd.vsync_enable) && (mfd->panel_power_on) && (!mfd->vsync_handler_pending)) { mfd->vsync_handler_pending = TRUE; if (!queue_work(mdp_vsync_wq, &mfd->vsync_resync_worker)) { MSM_FB_INFO ("mdp_set_vsync: can't queue_work! -> needs to increase vsync_resync_timer_duration\n"); } } else { MSM_FB_DEBUG ("mdp_set_vsync failed! EN:%d PWR:%d PENDING:%d\n", mfd->panel_info.lcd.vsync_enable, mfd->panel_power_on, mfd->vsync_handler_pending); } spin_lock(&vsync_timer_lock); if (!timer_shutdown_flag) { mfd->vsync_resync_timer.function = mdp_set_vsync; mfd->vsync_resync_timer.data = data; mfd->vsync_resync_timer.expires = jiffies + mfd->panel_info.lcd.vsync_notifier_period; add_timer(&mfd->vsync_resync_timer); } spin_unlock(&vsync_timer_lock); } static void mdp_vsync_handler(void *data) { struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data; if (vsync_clk_status == 0) { pr_debug("Warning: vsync clk is disabled\n"); mfd->vsync_handler_pending = FALSE; return; } if (mfd->use_mdp_vsync) { #ifdef MDP_HW_VSYNC if (mfd->panel_power_on) { MDP_OUTP(MDP_BASE + MDP_SYNC_STATUS_0, vsync_load_cnt); #ifdef CONFIG_FB_MSM_MDP40 if (mdp_hw_revision < MDP4_REVISION_V2_1) MDP_OUTP(MDP_BASE + MDP_SYNC_STATUS_1, vsync_load_cnt); #endif } #endif } else { mfd->last_vsync_timetick = ktime_get_real(); } mfd->vsync_handler_pending = FALSE; } irqreturn_t mdp_hw_vsync_handler_proxy(int irq, void *data) { /* * ToDo: tried enabling/disabling GPIO MDP HW VSYNC interrupt * but getting inaccurate timing in mdp_vsync_handler() * disable_irq(MDP_HW_VSYNC_IRQ); */ mdp_vsync_handler(data); return IRQ_HANDLED; } #ifdef MDP_HW_VSYNC static void mdp_set_sync_cfg_0(struct msm_fb_data_type *mfd, int vsync_cnt) { unsigned long cfg; cfg = mfd->total_lcd_lines - 1; cfg <<= MDP_SYNCFG_HGT_LOC; if (mfd->panel_info.lcd.hw_vsync_mode) cfg |= MDP_SYNCFG_VSYNC_EXT_EN; cfg |= (MDP_SYNCFG_VSYNC_INT_EN | vsync_cnt); MDP_OUTP(MDP_BASE + MDP_SYNC_CFG_0, cfg); } #ifdef CONFIG_FB_MSM_MDP40 static void mdp_set_sync_cfg_1(struct msm_fb_data_type *mfd, int vsync_cnt) { unsigned long cfg; cfg = mfd->total_lcd_lines - 1; cfg <<= MDP_SYNCFG_HGT_LOC; if (mfd->panel_info.lcd.hw_vsync_mode) cfg |= MDP_SYNCFG_VSYNC_EXT_EN; cfg |= (MDP_SYNCFG_VSYNC_INT_EN | vsync_cnt); MDP_OUTP(MDP_BASE + MDP_SYNC_CFG_1, cfg); } #endif void mdp_vsync_cfg_regs(struct msm_fb_data_type *mfd, boolean first_time) { /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); if (first_time) mdp_hw_vsync_clk_enable(mfd); mdp_set_sync_cfg_0(mfd, vsync_cnt_cfg); #ifdef CONFIG_FB_MSM_MDP40 if (mdp_hw_revision < MDP4_REVISION_V2_1) mdp_set_sync_cfg_1(mfd, vsync_cnt_cfg); #endif /* * load the last line + 1 to be in the * safety zone */ vsync_load_cnt = mfd->panel_info.yres; /* line counter init value at the next pulse */ MDP_OUTP(MDP_BASE + MDP_PRIM_VSYNC_INIT_VAL, vsync_load_cnt); #ifdef CONFIG_FB_MSM_MDP40 if (mdp_hw_revision < MDP4_REVISION_V2_1) { MDP_OUTP(MDP_BASE + MDP_SEC_VSYNC_INIT_VAL, vsync_load_cnt); } #endif /* * external vsync source pulse width and * polarity flip */ MDP_OUTP(MDP_BASE + MDP_PRIM_VSYNC_OUT_CTRL, BIT(0)); #ifdef CONFIG_FB_MSM_MDP40 if (mdp_hw_revision < MDP4_REVISION_V2_1) { MDP_OUTP(MDP_BASE + MDP_SEC_VSYNC_OUT_CTRL, BIT(0)); MDP_OUTP(MDP_BASE + MDP_VSYNC_SEL, 0x20); } #endif /* threshold */ MDP_OUTP(MDP_BASE + 0x200, (vsync_above_th << 16) | (vsync_start_th)); if (first_time) mdp_hw_vsync_clk_disable(mfd); /* MDP cmd block disable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); } #endif void mdp_config_vsync(struct platform_device *pdev, struct msm_fb_data_type *mfd) { /* vsync on primary lcd only for now */ if ((mfd->dest != DISPLAY_LCD) || (mfd->panel_info.pdest != DISPLAY_1) || (!vsync_mode)) { goto err_handle; } vsync_clk_status = 0; if (mfd->panel_info.lcd.vsync_enable) { mfd->total_porch_lines = mfd->panel_info.lcd.v_back_porch + mfd->panel_info.lcd.v_front_porch + mfd->panel_info.lcd.v_pulse_width; mfd->total_lcd_lines = mfd->panel_info.yres + mfd->total_porch_lines; mfd->lcd_ref_usec_time = 100000000 / mfd->panel_info.lcd.refx100; mfd->vsync_handler_pending = FALSE; mfd->last_vsync_timetick.tv64 = 0; #ifdef MDP_HW_VSYNC if (mdp_vsync_clk == NULL) mdp_vsync_clk = clk_get(&pdev->dev, "vsync_clk"); if (IS_ERR(mdp_vsync_clk)) { printk(KERN_ERR "error: can't get mdp_vsync_clk!\n"); mfd->use_mdp_vsync = 0; } else mfd->use_mdp_vsync = 1; if (mfd->use_mdp_vsync) { uint32 vsync_cnt_cfg_dem; uint32 mdp_vsync_clk_speed_hz; mdp_vsync_clk_speed_hz = clk_get_rate(mdp_vsync_clk); if (mdp_vsync_clk_speed_hz == 0) { mfd->use_mdp_vsync = 0; } else { /* * Do this calculation in 2 steps for * rounding uint32 properly. */ vsync_cnt_cfg_dem = (mfd->panel_info.lcd.refx100 * mfd->total_lcd_lines) / 100; vsync_cnt_cfg = (mdp_vsync_clk_speed_hz) / vsync_cnt_cfg_dem; mdp_vsync_cfg_regs(mfd, TRUE); } } #else mfd->use_mdp_vsync = 0; hrtimer_init(&mfd->dma_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); mfd->dma_hrtimer.function = mdp_dma2_vsync_hrtimer_handler; mfd->vsync_width_boundary = vmalloc(mfd->panel_info.xres * 4); #endif #ifdef CONFIG_FB_MSM_MDDI mfd->channel_irq = 0; if (mfd->panel_info.lcd.hw_vsync_mode) { u32 vsync_gpio = mfd->vsync_gpio; u32 ret; if (vsync_gpio == -1) { MSM_FB_INFO("vsync_gpio not defined!\n"); goto err_handle; } ret = gpio_tlmm_config(GPIO_CFG (vsync_gpio, (mfd->use_mdp_vsync) ? 1 : 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG_ENABLE); if (ret) goto err_handle; /* * if use_mdp_vsync, then no interrupt need since * mdp_vsync is feed directly to mdp to reset the * write pointer counter. therefore no irq_handler * need to reset write pointer counter. */ if (!mfd->use_mdp_vsync) { mfd->channel_irq = MSM_GPIO_TO_INT(vsync_gpio); if (request_irq (mfd->channel_irq, &mdp_hw_vsync_handler_proxy, IRQF_TRIGGER_FALLING, "VSYNC_GPIO", (void *)mfd)) { MSM_FB_INFO ("irq=%d failed! vsync_gpio=%d\n", mfd->channel_irq, vsync_gpio); goto err_handle; } } } #endif mdp_hw_vsync_clk_enable(mfd); mdp_set_vsync((unsigned long)mfd); } return; err_handle: if (mfd->vsync_width_boundary) vfree(mfd->vsync_width_boundary); mfd->panel_info.lcd.vsync_enable = FALSE; printk(KERN_ERR "%s: failed!\n", __func__); } void mdp_vsync_resync_workqueue_handler(struct work_struct *work) { struct msm_fb_data_type *mfd = NULL; int vsync_fnc_enabled = FALSE; struct msm_fb_panel_data *pdata = NULL; mfd = container_of(work, struct msm_fb_data_type, vsync_resync_worker); if (mfd) { if (mfd->panel_power_on) { pdata = (struct msm_fb_panel_data *)mfd->pdev->dev. platform_data; if (pdata->set_vsync_notifier != NULL) { if (pdata->clk_func && !pdata->clk_func(2)) { mfd->vsync_handler_pending = FALSE; return; } pdata->set_vsync_notifier( mdp_vsync_handler, (void *)mfd); vsync_fnc_enabled = TRUE; } } } if ((mfd) && (!vsync_fnc_enabled)) mfd->vsync_handler_pending = FALSE; } boolean mdp_hw_vsync_set_handler(msm_fb_vsync_handler_type handler, void *data) { /* * ToDo: tried enabling/disabling GPIO MDP HW VSYNC interrupt * but getting inaccurate timing in mdp_vsync_handler() * enable_irq(MDP_HW_VSYNC_IRQ); */ return TRUE; } uint32 mdp_get_lcd_line_counter(struct msm_fb_data_type *mfd) { uint32 elapsed_usec_time; uint32 lcd_line; ktime_t last_vsync_timetick_local; ktime_t curr_time; unsigned long flag; if ((!mfd->panel_info.lcd.vsync_enable) || (!vsync_mode)) return 0; spin_lock_irqsave(&mdp_spin_lock, flag); last_vsync_timetick_local = mfd->last_vsync_timetick; spin_unlock_irqrestore(&mdp_spin_lock, flag); curr_time = ktime_get_real(); elapsed_usec_time = ktime_to_us(ktime_sub(curr_time, last_vsync_timetick_local)); elapsed_usec_time = elapsed_usec_time % mfd->lcd_ref_usec_time; /* lcd line calculation referencing to line counter = 0 */ lcd_line = (elapsed_usec_time * mfd->total_lcd_lines) / mfd->lcd_ref_usec_time; /* lcd line adjusment referencing to the actual line counter at vsync */ lcd_line = (mfd->total_lcd_lines - mfd->panel_info.lcd.v_back_porch + lcd_line) % (mfd->total_lcd_lines + 1); if (lcd_line > mfd->total_lcd_lines) { MSM_FB_INFO ("mdp_get_lcd_line_counter: mdp_lcd_rd_cnt >= mfd->total_lcd_lines error!\n"); } return lcd_line; }
gpl-2.0
MoKee/android_kernel_samsung_smdk4x12
arch/arm/plat-versatile/sched-clock.c
3077
1741
/* * linux/arch/arm/plat-versatile/sched-clock.c * * Copyright (C) 1999 - 2003 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/io.h> #include <linux/sched.h> #include <asm/sched_clock.h> #include <plat/sched_clock.h> static DEFINE_CLOCK_DATA(cd); static void __iomem *ctr; /* * Constants generated by clocks_calc_mult_shift(m, s, 24MHz, NSEC_PER_SEC, 60). * This gives a resolution of about 41ns and a wrap period of about 178s. */ #define SC_MULT 2796202667u #define SC_SHIFT 26 unsigned long long notrace sched_clock(void) { if (ctr) { u32 cyc = readl(ctr); return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT); } else return 0; } static void notrace versatile_update_sched_clock(void) { u32 cyc = readl(ctr); update_sched_clock(&cd, cyc, (u32)~0); } void __init versatile_sched_clock_init(void __iomem *reg, unsigned long rate) { ctr = reg; init_fixed_sched_clock(&cd, versatile_update_sched_clock, 32, rate, SC_MULT, SC_SHIFT); }
gpl-2.0
Hashcode/android_kernel_samsung-jf-common
drivers/media/video/atmel-isi.c
4869
28541
/* * Copyright (c) 2011 Atmel Corporation * Josh Wu, <josh.wu@atmel.com> * * Based on previous work by Lars Haring, <lars.haring@atmel.com> * and Sedji Gaouaou * Based on the bttv driver for Bt848 with respective copyright holders * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <media/atmel-isi.h> #include <media/soc_camera.h> #include <media/soc_mediabus.h> #include <media/videobuf2-dma-contig.h> #define MAX_BUFFER_NUM 32 #define MAX_SUPPORT_WIDTH 2048 #define MAX_SUPPORT_HEIGHT 2048 #define VID_LIMIT_BYTES (16 * 1024 * 1024) #define MIN_FRAME_RATE 15 #define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE) /* ISI states */ enum { ISI_STATE_IDLE = 0, ISI_STATE_READY, ISI_STATE_WAIT_SOF, }; /* Frame buffer descriptor */ struct fbd { /* Physical address of the frame buffer */ u32 fb_address; /* DMA Control Register(only in HISI2) */ u32 dma_ctrl; /* Physical address of the next fbd */ u32 next_fbd_address; }; static void set_dma_ctrl(struct fbd *fb_desc, u32 ctrl) { fb_desc->dma_ctrl = ctrl; } struct isi_dma_desc { struct list_head list; struct fbd *p_fbd; u32 fbd_phys; }; /* Frame buffer data */ struct frame_buffer { struct vb2_buffer vb; struct isi_dma_desc *p_dma_desc; struct list_head list; }; struct atmel_isi { /* Protects the access of variables shared with the ISR */ spinlock_t lock; void __iomem *regs; int sequence; /* State of the ISI module in capturing mode */ int state; /* Wait queue for waiting for SOF */ wait_queue_head_t vsync_wq; struct vb2_alloc_ctx *alloc_ctx; /* Allocate descriptors for dma buffer use */ struct fbd *p_fb_descriptors; u32 fb_descriptors_phys; struct list_head dma_desc_head; struct isi_dma_desc dma_desc[MAX_BUFFER_NUM]; struct completion complete; /* ISI peripherial clock */ struct clk *pclk; /* ISI_MCK, feed to camera sensor to generate pixel clock */ struct clk *mck; unsigned int irq; struct isi_platform_data *pdata; u16 width_flags; /* max 12 bits */ struct list_head video_buffer_list; struct frame_buffer *active; struct soc_camera_device *icd; struct soc_camera_host soc_host; }; static void isi_writel(struct atmel_isi *isi, u32 reg, u32 val) { writel(val, isi->regs + reg); } static u32 isi_readl(struct atmel_isi *isi, u32 reg) { return readl(isi->regs + reg); } static int configure_geometry(struct atmel_isi *isi, u32 width, u32 height, enum v4l2_mbus_pixelcode code) { u32 cfg2, cr; switch (code) { /* YUV, including grey */ case V4L2_MBUS_FMT_Y8_1X8: cr = ISI_CFG2_GRAYSCALE; break; case V4L2_MBUS_FMT_UYVY8_2X8: cr = ISI_CFG2_YCC_SWAP_MODE_3; break; case V4L2_MBUS_FMT_VYUY8_2X8: cr = ISI_CFG2_YCC_SWAP_MODE_2; break; case V4L2_MBUS_FMT_YUYV8_2X8: cr = ISI_CFG2_YCC_SWAP_MODE_1; break; case V4L2_MBUS_FMT_YVYU8_2X8: cr = ISI_CFG2_YCC_SWAP_DEFAULT; break; /* RGB, TODO */ default: return -EINVAL; } isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS); cfg2 = isi_readl(isi, ISI_CFG2); cfg2 |= cr; /* Set width */ cfg2 &= ~(ISI_CFG2_IM_HSIZE_MASK); cfg2 |= ((width - 1) << ISI_CFG2_IM_HSIZE_OFFSET) & ISI_CFG2_IM_HSIZE_MASK; /* Set height */ cfg2 &= ~(ISI_CFG2_IM_VSIZE_MASK); cfg2 |= ((height - 1) << ISI_CFG2_IM_VSIZE_OFFSET) & ISI_CFG2_IM_VSIZE_MASK; isi_writel(isi, ISI_CFG2, cfg2); return 0; } static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi) { if (isi->active) { struct vb2_buffer *vb = &isi->active->vb; struct frame_buffer *buf = isi->active; list_del_init(&buf->list); do_gettimeofday(&vb->v4l2_buf.timestamp); vb->v4l2_buf.sequence = isi->sequence++; vb2_buffer_done(vb, VB2_BUF_STATE_DONE); } if (list_empty(&isi->video_buffer_list)) { isi->active = NULL; } else { /* start next dma frame. */ isi->active = list_entry(isi->video_buffer_list.next, struct frame_buffer, list); isi_writel(isi, ISI_DMA_C_DSCR, isi->active->p_dma_desc->fbd_phys); isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE); isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH); } return IRQ_HANDLED; } /* ISI interrupt service routine */ static irqreturn_t isi_interrupt(int irq, void *dev_id) { struct atmel_isi *isi = dev_id; u32 status, mask, pending; irqreturn_t ret = IRQ_NONE; spin_lock(&isi->lock); status = isi_readl(isi, ISI_STATUS); mask = isi_readl(isi, ISI_INTMASK); pending = status & mask; if (pending & ISI_CTRL_SRST) { complete(&isi->complete); isi_writel(isi, ISI_INTDIS, ISI_CTRL_SRST); ret = IRQ_HANDLED; } else if (pending & ISI_CTRL_DIS) { complete(&isi->complete); isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS); ret = IRQ_HANDLED; } else { if ((pending & ISI_SR_VSYNC) && (isi->state == ISI_STATE_IDLE)) { isi->state = ISI_STATE_READY; wake_up_interruptible(&isi->vsync_wq); ret = IRQ_HANDLED; } if (likely(pending & ISI_SR_CXFR_DONE)) ret = atmel_isi_handle_streaming(isi); } spin_unlock(&isi->lock); return ret; } #define WAIT_ISI_RESET 1 #define WAIT_ISI_DISABLE 0 static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset) { unsigned long timeout; /* * The reset or disable will only succeed if we have a * pixel clock from the camera. */ init_completion(&isi->complete); if (wait_reset) { isi_writel(isi, ISI_INTEN, ISI_CTRL_SRST); isi_writel(isi, ISI_CTRL, ISI_CTRL_SRST); } else { isi_writel(isi, ISI_INTEN, ISI_CTRL_DIS); isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS); } timeout = wait_for_completion_timeout(&isi->complete, msecs_to_jiffies(100)); if (timeout == 0) return -ETIMEDOUT; return 0; } /* ------------------------------------------------------------------ Videobuf operations ------------------------------------------------------------------*/ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct soc_camera_device *icd = soc_camera_from_vb2q(vq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct atmel_isi *isi = ici->priv; unsigned long size; int ret, bytes_per_line; /* Reset ISI */ ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET); if (ret < 0) { dev_err(icd->parent, "Reset ISI timed out\n"); return ret; } /* Disable all interrupts */ isi_writel(isi, ISI_INTDIS, ~0UL); bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, icd->current_fmt->host_fmt); if (bytes_per_line < 0) return bytes_per_line; size = bytes_per_line * icd->user_height; if (!*nbuffers || *nbuffers > MAX_BUFFER_NUM) *nbuffers = MAX_BUFFER_NUM; if (size * *nbuffers > VID_LIMIT_BYTES) *nbuffers = VID_LIMIT_BYTES / size; *nplanes = 1; sizes[0] = size; alloc_ctxs[0] = isi->alloc_ctx; isi->sequence = 0; isi->active = NULL; dev_dbg(icd->parent, "%s, count=%d, size=%ld\n", __func__, *nbuffers, size); return 0; } static int buffer_init(struct vb2_buffer *vb) { struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb); buf->p_dma_desc = NULL; INIT_LIST_HEAD(&buf->list); return 0; } static int buffer_prepare(struct vb2_buffer *vb) { struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct atmel_isi *isi = ici->priv; unsigned long size; struct isi_dma_desc *desc; int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, icd->current_fmt->host_fmt); if (bytes_per_line < 0) return bytes_per_line; size = bytes_per_line * icd->user_height; if (vb2_plane_size(vb, 0) < size) { dev_err(icd->parent, "%s data will not fit into plane (%lu < %lu)\n", __func__, vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(&buf->vb, 0, size); if (!buf->p_dma_desc) { if (list_empty(&isi->dma_desc_head)) { dev_err(icd->parent, "Not enough dma descriptors.\n"); return -EINVAL; } else { /* Get an available descriptor */ desc = list_entry(isi->dma_desc_head.next, struct isi_dma_desc, list); /* Delete the descriptor since now it is used */ list_del_init(&desc->list); /* Initialize the dma descriptor */ desc->p_fbd->fb_address = vb2_dma_contig_plane_dma_addr(vb, 0); desc->p_fbd->next_fbd_address = 0; set_dma_ctrl(desc->p_fbd, ISI_DMA_CTRL_WB); buf->p_dma_desc = desc; } } return 0; } static void buffer_cleanup(struct vb2_buffer *vb) { struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct atmel_isi *isi = ici->priv; struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb); /* This descriptor is available now and we add to head list */ if (buf->p_dma_desc) list_add(&buf->p_dma_desc->list, &isi->dma_desc_head); } static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer) { u32 ctrl, cfg1; cfg1 = isi_readl(isi, ISI_CFG1); /* Enable irq: cxfr for the codec path, pxfr for the preview path */ isi_writel(isi, ISI_INTEN, ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE); /* Check if already in a frame */ if (isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) { dev_err(isi->icd->parent, "Already in frame handling.\n"); return; } isi_writel(isi, ISI_DMA_C_DSCR, buffer->p_dma_desc->fbd_phys); isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE); isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH); /* Enable linked list */ cfg1 |= isi->pdata->frate | ISI_CFG1_DISCR; /* Enable codec path and ISI */ ctrl = ISI_CTRL_CDC | ISI_CTRL_EN; isi_writel(isi, ISI_CTRL, ctrl); isi_writel(isi, ISI_CFG1, cfg1); } static void buffer_queue(struct vb2_buffer *vb) { struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct atmel_isi *isi = ici->priv; struct frame_buffer *buf = container_of(vb, struct frame_buffer, vb); unsigned long flags = 0; spin_lock_irqsave(&isi->lock, flags); list_add_tail(&buf->list, &isi->video_buffer_list); if (isi->active == NULL) { isi->active = buf; if (vb2_is_streaming(vb->vb2_queue)) start_dma(isi, buf); } spin_unlock_irqrestore(&isi->lock, flags); } static int start_streaming(struct vb2_queue *vq, unsigned int count) { struct soc_camera_device *icd = soc_camera_from_vb2q(vq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct atmel_isi *isi = ici->priv; u32 sr = 0; int ret; spin_lock_irq(&isi->lock); isi->state = ISI_STATE_IDLE; /* Clear any pending SOF interrupt */ sr = isi_readl(isi, ISI_STATUS); /* Enable VSYNC interrupt for SOF */ isi_writel(isi, ISI_INTEN, ISI_SR_VSYNC); isi_writel(isi, ISI_CTRL, ISI_CTRL_EN); spin_unlock_irq(&isi->lock); dev_dbg(icd->parent, "Waiting for SOF\n"); ret = wait_event_interruptible(isi->vsync_wq, isi->state != ISI_STATE_IDLE); if (ret) goto err; if (isi->state != ISI_STATE_READY) { ret = -EIO; goto err; } spin_lock_irq(&isi->lock); isi->state = ISI_STATE_WAIT_SOF; isi_writel(isi, ISI_INTDIS, ISI_SR_VSYNC); if (count) start_dma(isi, isi->active); spin_unlock_irq(&isi->lock); return 0; err: isi->active = NULL; isi->sequence = 0; INIT_LIST_HEAD(&isi->video_buffer_list); return ret; } /* abort streaming and wait for last buffer */ static int stop_streaming(struct vb2_queue *vq) { struct soc_camera_device *icd = soc_camera_from_vb2q(vq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct atmel_isi *isi = ici->priv; struct frame_buffer *buf, *node; int ret = 0; unsigned long timeout; spin_lock_irq(&isi->lock); isi->active = NULL; /* Release all active buffers */ list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) { list_del_init(&buf->list); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); } spin_unlock_irq(&isi->lock); timeout = jiffies + FRAME_INTERVAL_MILLI_SEC * HZ; /* Wait until the end of the current frame. */ while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) && time_before(jiffies, timeout)) msleep(1); if (time_after(jiffies, timeout)) { dev_err(icd->parent, "Timeout waiting for finishing codec request\n"); return -ETIMEDOUT; } /* Disable interrupts */ isi_writel(isi, ISI_INTDIS, ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE); /* Disable ISI and wait for it is done */ ret = atmel_isi_wait_status(isi, WAIT_ISI_DISABLE); if (ret < 0) dev_err(icd->parent, "Disable ISI timed out\n"); return ret; } static struct vb2_ops isi_video_qops = { .queue_setup = queue_setup, .buf_init = buffer_init, .buf_prepare = buffer_prepare, .buf_cleanup = buffer_cleanup, .buf_queue = buffer_queue, .start_streaming = start_streaming, .stop_streaming = stop_streaming, .wait_prepare = soc_camera_unlock, .wait_finish = soc_camera_lock, }; /* ------------------------------------------------------------------ SOC camera operations for the device ------------------------------------------------------------------*/ static int isi_camera_init_videobuf(struct vb2_queue *q, struct soc_camera_device *icd) { q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP; q->drv_priv = icd; q->buf_struct_size = sizeof(struct frame_buffer); q->ops = &isi_video_qops; q->mem_ops = &vb2_dma_contig_memops; return vb2_queue_init(q); } static int isi_camera_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct atmel_isi *isi = ici->priv; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); if (!xlate) { dev_warn(icd->parent, "Format %x not found\n", pix->pixelformat); return -EINVAL; } dev_dbg(icd->parent, "Plan to set format %dx%d\n", pix->width, pix->height); mf.width = pix->width; mf.height = pix->height; mf.field = pix->field; mf.colorspace = pix->colorspace; mf.code = xlate->code; ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf); if (ret < 0) return ret; if (mf.code != xlate->code) return -EINVAL; ret = configure_geometry(isi, pix->width, pix->height, xlate->code); if (ret < 0) return ret; pix->width = mf.width; pix->height = mf.height; pix->field = mf.field; pix->colorspace = mf.colorspace; icd->current_fmt = xlate; dev_dbg(icd->parent, "Finally set format %dx%d\n", pix->width, pix->height); return ret; } static int isi_camera_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; u32 pixfmt = pix->pixelformat; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (pixfmt && !xlate) { dev_warn(icd->parent, "Format %x not found\n", pixfmt); return -EINVAL; } /* limit to Atmel ISI hardware capabilities */ if (pix->height > MAX_SUPPORT_HEIGHT) pix->height = MAX_SUPPORT_HEIGHT; if (pix->width > MAX_SUPPORT_WIDTH) pix->width = MAX_SUPPORT_WIDTH; /* limit to sensor capabilities */ mf.width = pix->width; mf.height = pix->height; mf.field = pix->field; mf.colorspace = pix->colorspace; mf.code = xlate->code; ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf); if (ret < 0) return ret; pix->width = mf.width; pix->height = mf.height; pix->colorspace = mf.colorspace; switch (mf.field) { case V4L2_FIELD_ANY: pix->field = V4L2_FIELD_NONE; break; case V4L2_FIELD_NONE: break; default: dev_err(icd->parent, "Field type %d unsupported.\n", mf.field); ret = -EINVAL; } return ret; } static const struct soc_mbus_pixelfmt isi_camera_formats[] = { { .fourcc = V4L2_PIX_FMT_YUYV, .name = "Packed YUV422 16 bit", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }; /* This will be corrected as we get more formats */ static bool isi_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt) { return fmt->packing == SOC_MBUS_PACKING_NONE || (fmt->bits_per_sample == 8 && fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || (fmt->bits_per_sample > 8 && fmt->packing == SOC_MBUS_PACKING_EXTEND16); } #define ISI_BUS_PARAM (V4L2_MBUS_MASTER | \ V4L2_MBUS_HSYNC_ACTIVE_HIGH | \ V4L2_MBUS_HSYNC_ACTIVE_LOW | \ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \ V4L2_MBUS_VSYNC_ACTIVE_LOW | \ V4L2_MBUS_PCLK_SAMPLE_RISING | \ V4L2_MBUS_PCLK_SAMPLE_FALLING | \ V4L2_MBUS_DATA_ACTIVE_HIGH) static int isi_camera_try_bus_param(struct soc_camera_device *icd, unsigned char buswidth) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct atmel_isi *isi = ici->priv; struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; unsigned long common_flags; int ret; ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); if (!ret) { common_flags = soc_mbus_config_compatible(&cfg, ISI_BUS_PARAM); if (!common_flags) { dev_warn(icd->parent, "Flags incompatible: camera 0x%x, host 0x%x\n", cfg.flags, ISI_BUS_PARAM); return -EINVAL; } } else if (ret != -ENOIOCTLCMD) { return ret; } if ((1 << (buswidth - 1)) & isi->width_flags) return 0; return -EINVAL; } static int isi_camera_get_formats(struct soc_camera_device *icd, unsigned int idx, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); int formats = 0, ret; /* sensor format */ enum v4l2_mbus_pixelcode code; /* soc camera host format */ const struct soc_mbus_pixelfmt *fmt; ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); if (ret < 0) /* No more formats */ return 0; fmt = soc_mbus_get_fmtdesc(code); if (!fmt) { dev_err(icd->parent, "Invalid format code #%u: %d\n", idx, code); return 0; } /* This also checks support for the requested bits-per-sample */ ret = isi_camera_try_bus_param(icd, fmt->bits_per_sample); if (ret < 0) { dev_err(icd->parent, "Fail to try the bus parameters.\n"); return 0; } switch (code) { case V4L2_MBUS_FMT_UYVY8_2X8: case V4L2_MBUS_FMT_VYUY8_2X8: case V4L2_MBUS_FMT_YUYV8_2X8: case V4L2_MBUS_FMT_YVYU8_2X8: formats++; if (xlate) { xlate->host_fmt = &isi_camera_formats[0]; xlate->code = code; xlate++; dev_dbg(icd->parent, "Providing format %s using code %d\n", isi_camera_formats[0].name, code); } break; default: if (!isi_camera_packing_supported(fmt)) return 0; if (xlate) dev_dbg(icd->parent, "Providing format %s in pass-through mode\n", fmt->name); } /* Generic pass-through */ formats++; if (xlate) { xlate->host_fmt = fmt; xlate->code = code; xlate++; } return formats; } /* Called with .video_lock held */ static int isi_camera_add_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct atmel_isi *isi = ici->priv; int ret; if (isi->icd) return -EBUSY; ret = clk_enable(isi->pclk); if (ret) return ret; ret = clk_enable(isi->mck); if (ret) { clk_disable(isi->pclk); return ret; } isi->icd = icd; dev_dbg(icd->parent, "Atmel ISI Camera driver attached to camera %d\n", icd->devnum); return 0; } /* Called with .video_lock held */ static void isi_camera_remove_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct atmel_isi *isi = ici->priv; BUG_ON(icd != isi->icd); clk_disable(isi->mck); clk_disable(isi->pclk); isi->icd = NULL; dev_dbg(icd->parent, "Atmel ISI Camera driver detached from camera %d\n", icd->devnum); } static unsigned int isi_camera_poll(struct file *file, poll_table *pt) { struct soc_camera_device *icd = file->private_data; return vb2_poll(&icd->vb2_vidq, file, pt); } static int isi_camera_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { strcpy(cap->driver, "atmel-isi"); strcpy(cap->card, "Atmel Image Sensor Interface"); cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING); return 0; } static int isi_camera_set_bus_param(struct soc_camera_device *icd) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct atmel_isi *isi = ici->priv; struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; unsigned long common_flags; int ret; u32 cfg1 = 0; ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); if (!ret) { common_flags = soc_mbus_config_compatible(&cfg, ISI_BUS_PARAM); if (!common_flags) { dev_warn(icd->parent, "Flags incompatible: camera 0x%x, host 0x%x\n", cfg.flags, ISI_BUS_PARAM); return -EINVAL; } } else if (ret != -ENOIOCTLCMD) { return ret; } else { common_flags = ISI_BUS_PARAM; } dev_dbg(icd->parent, "Flags cam: 0x%x host: 0x%x common: 0x%lx\n", cfg.flags, ISI_BUS_PARAM, common_flags); /* Make choises, based on platform preferences */ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) && (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) { if (isi->pdata->hsync_act_low) common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH; else common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW; } if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) && (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) { if (isi->pdata->vsync_act_low) common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH; else common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW; } if ((common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING) && (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)) { if (isi->pdata->pclk_act_falling) common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_RISING; else common_flags &= ~V4L2_MBUS_PCLK_SAMPLE_FALLING; } cfg.flags = common_flags; ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg); if (ret < 0 && ret != -ENOIOCTLCMD) { dev_dbg(icd->parent, "camera s_mbus_config(0x%lx) returned %d\n", common_flags, ret); return ret; } /* set bus param for ISI */ if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) cfg1 |= ISI_CFG1_HSYNC_POL_ACTIVE_LOW; if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW) cfg1 |= ISI_CFG1_VSYNC_POL_ACTIVE_LOW; if (common_flags & V4L2_MBUS_PCLK_SAMPLE_FALLING) cfg1 |= ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING; if (isi->pdata->has_emb_sync) cfg1 |= ISI_CFG1_EMB_SYNC; if (isi->pdata->full_mode) cfg1 |= ISI_CFG1_FULL_MODE; isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS); isi_writel(isi, ISI_CFG1, cfg1); return 0; } static struct soc_camera_host_ops isi_soc_camera_host_ops = { .owner = THIS_MODULE, .add = isi_camera_add_device, .remove = isi_camera_remove_device, .set_fmt = isi_camera_set_fmt, .try_fmt = isi_camera_try_fmt, .get_formats = isi_camera_get_formats, .init_videobuf2 = isi_camera_init_videobuf, .poll = isi_camera_poll, .querycap = isi_camera_querycap, .set_bus_param = isi_camera_set_bus_param, }; /* -----------------------------------------------------------------------*/ static int __devexit atmel_isi_remove(struct platform_device *pdev) { struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); struct atmel_isi *isi = container_of(soc_host, struct atmel_isi, soc_host); free_irq(isi->irq, isi); soc_camera_host_unregister(soc_host); vb2_dma_contig_cleanup_ctx(isi->alloc_ctx); dma_free_coherent(&pdev->dev, sizeof(struct fbd) * MAX_BUFFER_NUM, isi->p_fb_descriptors, isi->fb_descriptors_phys); iounmap(isi->regs); clk_unprepare(isi->mck); clk_put(isi->mck); clk_unprepare(isi->pclk); clk_put(isi->pclk); kfree(isi); return 0; } static int __devinit atmel_isi_probe(struct platform_device *pdev) { unsigned int irq; struct atmel_isi *isi; struct clk *pclk; struct resource *regs; int ret, i; struct device *dev = &pdev->dev; struct soc_camera_host *soc_host; struct isi_platform_data *pdata; pdata = dev->platform_data; if (!pdata || !pdata->data_width_flags || !pdata->mck_hz) { dev_err(&pdev->dev, "No config available for Atmel ISI\n"); return -EINVAL; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) return -ENXIO; pclk = clk_get(&pdev->dev, "isi_clk"); if (IS_ERR(pclk)) return PTR_ERR(pclk); ret = clk_prepare(pclk); if (ret) goto err_clk_prepare_pclk; isi = kzalloc(sizeof(struct atmel_isi), GFP_KERNEL); if (!isi) { ret = -ENOMEM; dev_err(&pdev->dev, "Can't allocate interface!\n"); goto err_alloc_isi; } isi->pclk = pclk; isi->pdata = pdata; isi->active = NULL; spin_lock_init(&isi->lock); init_waitqueue_head(&isi->vsync_wq); INIT_LIST_HEAD(&isi->video_buffer_list); INIT_LIST_HEAD(&isi->dma_desc_head); /* Get ISI_MCK, provided by programmable clock or external clock */ isi->mck = clk_get(dev, "isi_mck"); if (IS_ERR(isi->mck)) { dev_err(dev, "Failed to get isi_mck\n"); ret = PTR_ERR(isi->mck); goto err_clk_get; } ret = clk_prepare(isi->mck); if (ret) goto err_clk_prepare_mck; /* Set ISI_MCK's frequency, it should be faster than pixel clock */ ret = clk_set_rate(isi->mck, pdata->mck_hz); if (ret < 0) goto err_set_mck_rate; isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev, sizeof(struct fbd) * MAX_BUFFER_NUM, &isi->fb_descriptors_phys, GFP_KERNEL); if (!isi->p_fb_descriptors) { ret = -ENOMEM; dev_err(&pdev->dev, "Can't allocate descriptors!\n"); goto err_alloc_descriptors; } for (i = 0; i < MAX_BUFFER_NUM; i++) { isi->dma_desc[i].p_fbd = isi->p_fb_descriptors + i; isi->dma_desc[i].fbd_phys = isi->fb_descriptors_phys + i * sizeof(struct fbd); list_add(&isi->dma_desc[i].list, &isi->dma_desc_head); } isi->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(isi->alloc_ctx)) { ret = PTR_ERR(isi->alloc_ctx); goto err_alloc_ctx; } isi->regs = ioremap(regs->start, resource_size(regs)); if (!isi->regs) { ret = -ENOMEM; goto err_ioremap; } if (pdata->data_width_flags & ISI_DATAWIDTH_8) isi->width_flags = 1 << 7; if (pdata->data_width_flags & ISI_DATAWIDTH_10) isi->width_flags |= 1 << 9; isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS); irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto err_req_irq; } ret = request_irq(irq, isi_interrupt, 0, "isi", isi); if (ret) { dev_err(&pdev->dev, "Unable to request irq %d\n", irq); goto err_req_irq; } isi->irq = irq; soc_host = &isi->soc_host; soc_host->drv_name = "isi-camera"; soc_host->ops = &isi_soc_camera_host_ops; soc_host->priv = isi; soc_host->v4l2_dev.dev = &pdev->dev; soc_host->nr = pdev->id; ret = soc_camera_host_register(soc_host); if (ret) { dev_err(&pdev->dev, "Unable to register soc camera host\n"); goto err_register_soc_camera_host; } return 0; err_register_soc_camera_host: free_irq(isi->irq, isi); err_req_irq: iounmap(isi->regs); err_ioremap: vb2_dma_contig_cleanup_ctx(isi->alloc_ctx); err_alloc_ctx: dma_free_coherent(&pdev->dev, sizeof(struct fbd) * MAX_BUFFER_NUM, isi->p_fb_descriptors, isi->fb_descriptors_phys); err_alloc_descriptors: err_set_mck_rate: clk_unprepare(isi->mck); err_clk_prepare_mck: clk_put(isi->mck); err_clk_get: kfree(isi); err_alloc_isi: clk_unprepare(pclk); err_clk_prepare_pclk: clk_put(pclk); return ret; } static struct platform_driver atmel_isi_driver = { .probe = atmel_isi_probe, .remove = __devexit_p(atmel_isi_remove), .driver = { .name = "atmel_isi", .owner = THIS_MODULE, }, }; static int __init atmel_isi_init_module(void) { return platform_driver_probe(&atmel_isi_driver, &atmel_isi_probe); } static void __exit atmel_isi_exit(void) { platform_driver_unregister(&atmel_isi_driver); } module_init(atmel_isi_init_module); module_exit(atmel_isi_exit); MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>"); MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("video");
gpl-2.0
ashishkrishnan/android_kernel_samsung_smdk4412
drivers/char/ppdev.c
7941
19633
/* * linux/drivers/char/ppdev.c * * This is the code behind /dev/parport* -- it allows a user-space * application to use the parport subsystem. * * Copyright (C) 1998-2000, 2002 Tim Waugh <tim@cyberelk.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * A /dev/parportx device node represents an arbitrary device * on port 'x'. The following operations are possible: * * open do nothing, set up default IEEE 1284 protocol to be COMPAT * close release port and unregister device (if necessary) * ioctl * EXCL register device exclusively (may fail) * CLAIM (register device first time) parport_claim_or_block * RELEASE parport_release * SETMODE set the IEEE 1284 protocol to use for read/write * SETPHASE set the IEEE 1284 phase of a particular mode. Not to be * confused with ioctl(fd, SETPHASER, &stun). ;-) * DATADIR data_forward / data_reverse * WDATA write_data * RDATA read_data * WCONTROL write_control * RCONTROL read_control * FCONTROL frob_control * RSTATUS read_status * NEGOT parport_negotiate * YIELD parport_yield_blocking * WCTLONIRQ on interrupt, set control lines * CLRIRQ clear (and return) interrupt count * SETTIME sets device timeout (struct timeval) * GETTIME gets device timeout (struct timeval) * GETMODES gets hardware supported modes (unsigned int) * GETMODE gets the current IEEE1284 mode * GETPHASE gets the current IEEE1284 phase * GETFLAGS gets current (user-visible) flags * SETFLAGS sets current (user-visible) flags * read/write read or write in current IEEE 1284 protocol * select wait for interrupt (in readfds) * * Changes: * Added SETTIME/GETTIME ioctl, Fred Barnes, 1999. * * Arnaldo Carvalho de Melo <acme@conectiva.com.br> 2000/08/25 * - On error, copy_from_user and copy_to_user do not return -EFAULT, * They return the positive number of bytes *not* copied due to address * space errors. * * Added GETMODES/GETMODE/GETPHASE ioctls, Fred Barnes <frmb2@ukc.ac.uk>, 03/01/2001. * Added GETFLAGS/SETFLAGS ioctls, Fred Barnes, 04/2001 */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/ioctl.h> #include <linux/parport.h> #include <linux/ctype.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/major.h> #include <linux/ppdev.h> #include <linux/mutex.h> #include <linux/uaccess.h> #define PP_VERSION "ppdev: user-space parallel port driver" #define CHRDEV "ppdev" struct pp_struct { struct pardevice * pdev; wait_queue_head_t irq_wait; atomic_t irqc; unsigned int flags; int irqresponse; unsigned char irqctl; struct ieee1284_info state; struct ieee1284_info saved_state; long default_inactivity; }; /* pp_struct.flags bitfields */ #define PP_CLAIMED (1<<0) #define PP_EXCL (1<<1) /* Other constants */ #define PP_INTERRUPT_TIMEOUT (10 * HZ) /* 10s */ #define PP_BUFFER_SIZE 1024 #define PARDEVICE_MAX 8 /* ROUND_UP macro from fs/select.c */ #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) static DEFINE_MUTEX(pp_do_mutex); static inline void pp_enable_irq (struct pp_struct *pp) { struct parport *port = pp->pdev->port; port->ops->enable_irq (port); } static ssize_t pp_read (struct file * file, char __user * buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; char * kbuffer; ssize_t bytes_read = 0; struct parport *pport; int mode; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } /* Trivial case. */ if (count == 0) return 0; kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) { return -ENOMEM; } pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout (pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_read == 0) { ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE); if (mode == IEEE1284_MODE_EPP) { /* various specials for EPP mode */ int flags = 0; size_t (*fn)(struct parport *, void *, size_t, int); if (pp->flags & PP_W91284PIC) { flags |= PARPORT_W91284PIC; } if (pp->flags & PP_FASTREAD) { flags |= PARPORT_EPP_FAST; } if (pport->ieee1284.mode & IEEE1284_ADDR) { fn = pport->ops->epp_read_addr; } else { fn = pport->ops->epp_read_data; } bytes_read = (*fn)(pport, kbuffer, need, flags); } else { bytes_read = parport_read (pport, kbuffer, need); } if (bytes_read != 0) break; if (file->f_flags & O_NONBLOCK) { bytes_read = -EAGAIN; break; } if (signal_pending (current)) { bytes_read = -ERESTARTSYS; break; } cond_resched(); } parport_set_timeout (pp->pdev, pp->default_inactivity); if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read)) bytes_read = -EFAULT; kfree (kbuffer); pp_enable_irq (pp); return bytes_read; } static ssize_t pp_write (struct file * file, const char __user * buf, size_t count, loff_t * ppos) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; char * kbuffer; ssize_t bytes_written = 0; ssize_t wrote; int mode; struct parport *pport; if (!(pp->flags & PP_CLAIMED)) { /* Don't have the port claimed */ pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL); if (!kbuffer) { return -ENOMEM; } pport = pp->pdev->port; mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR); parport_set_timeout (pp->pdev, (file->f_flags & O_NONBLOCK) ? PARPORT_INACTIVITY_O_NONBLOCK : pp->default_inactivity); while (bytes_written < count) { ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE); if (copy_from_user (kbuffer, buf + bytes_written, n)) { bytes_written = -EFAULT; break; } if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { /* do a fast EPP write */ if (pport->ieee1284.mode & IEEE1284_ADDR) { wrote = pport->ops->epp_write_addr (pport, kbuffer, n, PARPORT_EPP_FAST); } else { wrote = pport->ops->epp_write_data (pport, kbuffer, n, PARPORT_EPP_FAST); } } else { wrote = parport_write (pp->pdev->port, kbuffer, n); } if (wrote <= 0) { if (!bytes_written) { bytes_written = wrote; } break; } bytes_written += wrote; if (file->f_flags & O_NONBLOCK) { if (!bytes_written) bytes_written = -EAGAIN; break; } if (signal_pending (current)) { if (!bytes_written) { bytes_written = -EINTR; } break; } cond_resched(); } parport_set_timeout (pp->pdev, pp->default_inactivity); kfree (kbuffer); pp_enable_irq (pp); return bytes_written; } static void pp_irq (void *private) { struct pp_struct *pp = private; if (pp->irqresponse) { parport_write_control (pp->pdev->port, pp->irqctl); pp->irqresponse = 0; } atomic_inc (&pp->irqc); wake_up_interruptible (&pp->irq_wait); } static int register_device (int minor, struct pp_struct *pp) { struct parport *port; struct pardevice * pdev = NULL; char *name; int fl; name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); if (name == NULL) return -ENOMEM; port = parport_find_number (minor); if (!port) { printk (KERN_WARNING "%s: no associated port!\n", name); kfree (name); return -ENXIO; } fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; pdev = parport_register_device (port, name, NULL, NULL, pp_irq, fl, pp); parport_put_port (port); if (!pdev) { printk (KERN_WARNING "%s: failed to register device!\n", name); kfree (name); return -ENXIO; } pp->pdev = pdev; pr_debug("%s: registered pardevice\n", name); return 0; } static enum ieee1284_phase init_phase (int mode) { switch (mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR)) { case IEEE1284_MODE_NIBBLE: case IEEE1284_MODE_BYTE: return IEEE1284_PH_REV_IDLE; } return IEEE1284_PH_FWD_IDLE; } static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct pp_struct *pp = file->private_data; struct parport * port; void __user *argp = (void __user *)arg; /* First handle the cases that don't take arguments. */ switch (cmd) { case PPCLAIM: { struct ieee1284_info *info; int ret; if (pp->flags & PP_CLAIMED) { pr_debug(CHRDEV "%x: you've already got it!\n", minor); return -EINVAL; } /* Deferred device registration. */ if (!pp->pdev) { int err = register_device (minor, pp); if (err) { return err; } } ret = parport_claim_or_block (pp->pdev); if (ret < 0) return ret; pp->flags |= PP_CLAIMED; /* For interrupt-reporting to work, we need to be * informed of each interrupt. */ pp_enable_irq (pp); /* We may need to fix up the state machine. */ info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; pp->default_inactivity = parport_set_timeout (pp->pdev, 0); parport_set_timeout (pp->pdev, pp->default_inactivity); return 0; } case PPEXCL: if (pp->pdev) { pr_debug(CHRDEV "%x: too late for PPEXCL; " "already registered\n", minor); if (pp->flags & PP_EXCL) /* But it's not really an error. */ return 0; /* There's no chance of making the driver happy. */ return -EINVAL; } /* Just remember to register the device exclusively * when we finally do the registration. */ pp->flags |= PP_EXCL; return 0; case PPSETMODE: { int mode; if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; /* FIXME: validate mode */ pp->state.mode = mode; pp->state.phase = init_phase (mode); if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.mode = mode; pp->pdev->port->ieee1284.phase = pp->state.phase; } return 0; } case PPGETMODE: { int mode; if (pp->flags & PP_CLAIMED) { mode = pp->pdev->port->ieee1284.mode; } else { mode = pp->state.mode; } if (copy_to_user (argp, &mode, sizeof (mode))) { return -EFAULT; } return 0; } case PPSETPHASE: { int phase; if (copy_from_user (&phase, argp, sizeof (phase))) { return -EFAULT; } /* FIXME: validate phase */ pp->state.phase = phase; if (pp->flags & PP_CLAIMED) { pp->pdev->port->ieee1284.phase = phase; } return 0; } case PPGETPHASE: { int phase; if (pp->flags & PP_CLAIMED) { phase = pp->pdev->port->ieee1284.phase; } else { phase = pp->state.phase; } if (copy_to_user (argp, &phase, sizeof (phase))) { return -EFAULT; } return 0; } case PPGETMODES: { unsigned int modes; port = parport_find_number (minor); if (!port) return -ENODEV; modes = port->modes; parport_put_port(port); if (copy_to_user (argp, &modes, sizeof (modes))) { return -EFAULT; } return 0; } case PPSETFLAGS: { int uflags; if (copy_from_user (&uflags, argp, sizeof (uflags))) { return -EFAULT; } pp->flags &= ~PP_FLAGMASK; pp->flags |= (uflags & PP_FLAGMASK); return 0; } case PPGETFLAGS: { int uflags; uflags = pp->flags & PP_FLAGMASK; if (copy_to_user (argp, &uflags, sizeof (uflags))) { return -EFAULT; } return 0; } } /* end switch() */ /* Everything else requires the port to be claimed, so check * that now. */ if ((pp->flags & PP_CLAIMED) == 0) { pr_debug(CHRDEV "%x: claim the port first\n", minor); return -EINVAL; } port = pp->pdev->port; switch (cmd) { struct ieee1284_info *info; unsigned char reg; unsigned char mask; int mode; int ret; struct timeval par_timeout; long to_jiffies; case PPRSTATUS: reg = parport_read_status (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPRDATA: reg = parport_read_data (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPRCONTROL: reg = parport_read_control (port); if (copy_to_user (argp, &reg, sizeof (reg))) return -EFAULT; return 0; case PPYIELD: parport_yield_blocking (pp->pdev); return 0; case PPRELEASE: /* Save the state machine's state. */ info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release (pp->pdev); pp->flags &= ~PP_CLAIMED; return 0; case PPWCONTROL: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; parport_write_control (port, reg); return 0; case PPWDATA: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; parport_write_data (port, reg); return 0; case PPFCONTROL: if (copy_from_user (&mask, argp, sizeof (mask))) return -EFAULT; if (copy_from_user (&reg, 1 + (unsigned char __user *) arg, sizeof (reg))) return -EFAULT; parport_frob_control (port, mask, reg); return 0; case PPDATADIR: if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; if (mode) port->ops->data_reverse (port); else port->ops->data_forward (port); return 0; case PPNEGOT: if (copy_from_user (&mode, argp, sizeof (mode))) return -EFAULT; switch ((ret = parport_negotiate (port, mode))) { case 0: break; case -1: /* handshake failed, peripheral not IEEE 1284 */ ret = -EIO; break; case 1: /* handshake succeeded, peripheral rejected mode */ ret = -ENXIO; break; } pp_enable_irq (pp); return ret; case PPWCTLONIRQ: if (copy_from_user (&reg, argp, sizeof (reg))) return -EFAULT; /* Remember what to set the control lines to, for next * time we get an interrupt. */ pp->irqctl = reg; pp->irqresponse = 1; return 0; case PPCLRIRQ: ret = atomic_read (&pp->irqc); if (copy_to_user (argp, &ret, sizeof (ret))) return -EFAULT; atomic_sub (ret, &pp->irqc); return 0; case PPSETTIME: if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) { return -EFAULT; } /* Convert to jiffies, place in pp->pdev->timeout */ if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) { return -EINVAL; } to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ); to_jiffies += par_timeout.tv_sec * (long)HZ; if (to_jiffies <= 0) { return -EINVAL; } pp->pdev->timeout = to_jiffies; return 0; case PPGETTIME: to_jiffies = pp->pdev->timeout; memset(&par_timeout, 0, sizeof(par_timeout)); par_timeout.tv_sec = to_jiffies / HZ; par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ); if (copy_to_user (argp, &par_timeout, sizeof(struct timeval))) return -EFAULT; return 0; default: pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd); return -EINVAL; } /* Keep the compiler happy */ return 0; } static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&pp_do_mutex); ret = pp_do_ioctl(file, cmd, arg); mutex_unlock(&pp_do_mutex); return ret; } static int pp_open (struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct pp_struct *pp; if (minor >= PARPORT_MAX) return -ENXIO; pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL); if (!pp) return -ENOMEM; pp->state.mode = IEEE1284_MODE_COMPAT; pp->state.phase = init_phase (pp->state.mode); pp->flags = 0; pp->irqresponse = 0; atomic_set (&pp->irqc, 0); init_waitqueue_head (&pp->irq_wait); /* Defer the actual device registration until the first claim. * That way, we know whether or not the driver wants to have * exclusive access to the port (PPEXCL). */ pp->pdev = NULL; file->private_data = pp; return 0; } static int pp_release (struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct pp_struct *pp = file->private_data; int compat_negot; compat_negot = 0; if (!(pp->flags & PP_CLAIMED) && pp->pdev && (pp->state.mode != IEEE1284_MODE_COMPAT)) { struct ieee1284_info *info; /* parport released, but not in compatibility mode */ parport_claim_or_block (pp->pdev); pp->flags |= PP_CLAIMED; info = &pp->pdev->port->ieee1284; pp->saved_state.mode = info->mode; pp->saved_state.phase = info->phase; info->mode = pp->state.mode; info->phase = pp->state.phase; compat_negot = 1; } else if ((pp->flags & PP_CLAIMED) && pp->pdev && (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT)) { compat_negot = 2; } if (compat_negot) { parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT); pr_debug(CHRDEV "%x: negotiated back to compatibility " "mode because user-space forgot\n", minor); } if (pp->flags & PP_CLAIMED) { struct ieee1284_info *info; info = &pp->pdev->port->ieee1284; pp->state.mode = info->mode; pp->state.phase = info->phase; info->mode = pp->saved_state.mode; info->phase = pp->saved_state.phase; parport_release (pp->pdev); if (compat_negot != 1) { pr_debug(CHRDEV "%x: released pardevice " "because user-space forgot\n", minor); } } if (pp->pdev) { const char *name = pp->pdev->name; parport_unregister_device (pp->pdev); kfree (name); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } kfree (pp); return 0; } /* No kernel lock held - fine */ static unsigned int pp_poll (struct file * file, poll_table * wait) { struct pp_struct *pp = file->private_data; unsigned int mask = 0; poll_wait (file, &pp->irq_wait, wait); if (atomic_read (&pp->irqc)) mask |= POLLIN | POLLRDNORM; return mask; } static struct class *ppdev_class; static const struct file_operations pp_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = pp_read, .write = pp_write, .poll = pp_poll, .unlocked_ioctl = pp_ioctl, .open = pp_open, .release = pp_release, }; static void pp_attach(struct parport *port) { device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number), NULL, "parport%d", port->number); } static void pp_detach(struct parport *port) { device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number)); } static struct parport_driver pp_driver = { .name = CHRDEV, .attach = pp_attach, .detach = pp_detach, }; static int __init ppdev_init (void) { int err = 0; if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) { printk (KERN_WARNING CHRDEV ": unable to get major %d\n", PP_MAJOR); return -EIO; } ppdev_class = class_create(THIS_MODULE, CHRDEV); if (IS_ERR(ppdev_class)) { err = PTR_ERR(ppdev_class); goto out_chrdev; } if (parport_register_driver(&pp_driver)) { printk (KERN_WARNING CHRDEV ": unable to register with parport\n"); goto out_class; } printk (KERN_INFO PP_VERSION "\n"); goto out; out_class: class_destroy(ppdev_class); out_chrdev: unregister_chrdev(PP_MAJOR, CHRDEV); out: return err; } static void __exit ppdev_cleanup (void) { /* Clean up all parport stuff */ parport_unregister_driver(&pp_driver); class_destroy(ppdev_class); unregister_chrdev (PP_MAJOR, CHRDEV); } module_init(ppdev_init); module_exit(ppdev_cleanup); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(PP_MAJOR);
gpl-2.0
loxdegio/GT_S7500_LoxKernel_trebon
GT-S7500_Kernel/drivers/tty/serial/8250_early.c
8453
7218
/* * Early serial console for 8250/16550 devices * * (c) Copyright 2004 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <bjorn.helgaas@hp.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Based on the 8250.c serial driver, Copyright (C) 2001 Russell King, * and on early_printk.c by Andi Kleen. * * This is for use before the serial driver has initialized, in * particular, before the UARTs have been discovered and named. * Instead of specifying the console device as, e.g., "ttyS0", * we locate the device directly by its MMIO or I/O port address. * * The user can specify the device directly, e.g., * earlycon=uart8250,io,0x3f8,9600n8 * earlycon=uart8250,mmio,0xff5e0000,115200n8 * earlycon=uart8250,mmio32,0xff5e0000,115200n8 * or * console=uart8250,io,0x3f8,9600n8 * console=uart8250,mmio,0xff5e0000,115200n8 * console=uart8250,mmio32,0xff5e0000,115200n8 */ #include <linux/tty.h> #include <linux/init.h> #include <linux/console.h> #include <linux/serial_core.h> #include <linux/serial_reg.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <asm/io.h> #include <asm/serial.h> #ifdef CONFIG_FIX_EARLYCON_MEM #include <asm/pgtable.h> #include <asm/fixmap.h> #endif struct early_serial8250_device { struct uart_port port; char options[16]; /* e.g., 115200n8 */ unsigned int baud; }; static struct early_serial8250_device early_device; static unsigned int __init serial_in(struct uart_port *port, int offset) { switch (port->iotype) { case UPIO_MEM: return readb(port->membase + offset); case UPIO_MEM32: return readl(port->membase + (offset << 2)); case UPIO_PORT: return inb(port->iobase + offset); default: return 0; } } static void __init serial_out(struct uart_port *port, int offset, int value) { switch (port->iotype) { case UPIO_MEM: writeb(value, port->membase + offset); break; case UPIO_MEM32: writel(value, port->membase + (offset << 2)); break; case UPIO_PORT: outb(value, port->iobase + offset); break; } } #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) static void __init wait_for_xmitr(struct uart_port *port) { unsigned int status; for (;;) { status = serial_in(port, UART_LSR); if ((status & BOTH_EMPTY) == BOTH_EMPTY) return; cpu_relax(); } } static void __init serial_putc(struct uart_port *port, int c) { wait_for_xmitr(port); serial_out(port, UART_TX, c); } static void __init early_serial8250_write(struct console *console, const char *s, unsigned int count) { struct uart_port *port = &early_device.port; unsigned int ier; /* Save the IER and disable interrupts */ ier = serial_in(port, UART_IER); serial_out(port, UART_IER, 0); uart_console_write(port, s, count, serial_putc); /* Wait for transmitter to become empty and restore the IER */ wait_for_xmitr(port); serial_out(port, UART_IER, ier); } static unsigned int __init probe_baud(struct uart_port *port) { unsigned char lcr, dll, dlm; unsigned int quot; lcr = serial_in(port, UART_LCR); serial_out(port, UART_LCR, lcr | UART_LCR_DLAB); dll = serial_in(port, UART_DLL); dlm = serial_in(port, UART_DLM); serial_out(port, UART_LCR, lcr); quot = (dlm << 8) | dll; return (port->uartclk / 16) / quot; } static void __init init_port(struct early_serial8250_device *device) { struct uart_port *port = &device->port; unsigned int divisor; unsigned char c; serial_out(port, UART_LCR, 0x3); /* 8n1 */ serial_out(port, UART_IER, 0); /* no interrupt */ serial_out(port, UART_FCR, 0); /* no fifo */ serial_out(port, UART_MCR, 0x3); /* DTR + RTS */ divisor = port->uartclk / (16 * device->baud); c = serial_in(port, UART_LCR); serial_out(port, UART_LCR, c | UART_LCR_DLAB); serial_out(port, UART_DLL, divisor & 0xff); serial_out(port, UART_DLM, (divisor >> 8) & 0xff); serial_out(port, UART_LCR, c & ~UART_LCR_DLAB); } static int __init parse_options(struct early_serial8250_device *device, char *options) { struct uart_port *port = &device->port; int mmio, mmio32, length; if (!options) return -ENODEV; port->uartclk = BASE_BAUD * 16; mmio = !strncmp(options, "mmio,", 5); mmio32 = !strncmp(options, "mmio32,", 7); if (mmio || mmio32) { port->iotype = (mmio ? UPIO_MEM : UPIO_MEM32); port->mapbase = simple_strtoul(options + (mmio ? 5 : 7), &options, 0); if (mmio32) port->regshift = 2; #ifdef CONFIG_FIX_EARLYCON_MEM set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, port->mapbase & PAGE_MASK); port->membase = (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE); port->membase += port->mapbase & ~PAGE_MASK; #else port->membase = ioremap_nocache(port->mapbase, 64); if (!port->membase) { printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n", __func__, (unsigned long long) port->mapbase); return -ENOMEM; } #endif } else if (!strncmp(options, "io,", 3)) { port->iotype = UPIO_PORT; port->iobase = simple_strtoul(options + 3, &options, 0); mmio = 0; } else return -EINVAL; options = strchr(options, ','); if (options) { options++; device->baud = simple_strtoul(options, NULL, 0); length = min(strcspn(options, " "), sizeof(device->options)); strncpy(device->options, options, length); } else { device->baud = probe_baud(port); snprintf(device->options, sizeof(device->options), "%u", device->baud); } if (mmio || mmio32) printk(KERN_INFO "Early serial console at MMIO%s 0x%llx (options '%s')\n", mmio32 ? "32" : "", (unsigned long long)port->mapbase, device->options); else printk(KERN_INFO "Early serial console at I/O port 0x%lx (options '%s')\n", port->iobase, device->options); return 0; } static struct console early_serial8250_console __initdata = { .name = "uart", .write = early_serial8250_write, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1, }; static int __init early_serial8250_setup(char *options) { struct early_serial8250_device *device = &early_device; int err; if (device->port.membase || device->port.iobase) return 0; err = parse_options(device, options); if (err < 0) return err; init_port(device); return 0; } int __init setup_early_serial8250_console(char *cmdline) { char *options; int err; options = strstr(cmdline, "uart8250,"); if (!options) { options = strstr(cmdline, "uart,"); if (!options) return 0; } options = strchr(cmdline, ',') + 1; err = early_serial8250_setup(options); if (err < 0) return err; register_console(&early_serial8250_console); return 0; } int serial8250_find_port_for_earlycon(void) { struct early_serial8250_device *device = &early_device; struct uart_port *port = &device->port; int line; int ret; if (!device->port.membase && !device->port.iobase) return -ENODEV; line = serial8250_find_port(port); if (line < 0) return -ENODEV; ret = update_console_cmdline("uart", 8250, "ttyS", line, device->options); if (ret < 0) ret = update_console_cmdline("uart", 0, "ttyS", line, device->options); return ret; } early_param("earlycon", setup_early_serial8250_console);
gpl-2.0
jcadduono/nethunter_kernel_klte
sound/pci/ice1712/vt1720_mobo.c
9221
3936
/* * ALSA driver for VT1720/VT1724 (Envy24PT/Envy24HT) * * Lowlevel functions for VT1720-based motherboards * * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <sound/core.h> #include "ice1712.h" #include "envy24ht.h" #include "vt1720_mobo.h" static int __devinit k8x800_init(struct snd_ice1712 *ice) { ice->vt1720 = 1; /* VT1616 codec */ ice->num_total_dacs = 6; ice->num_total_adcs = 2; /* WM8728 codec */ /* FIXME: TODO */ return 0; } static int __devinit k8x800_add_controls(struct snd_ice1712 *ice) { /* FIXME: needs some quirks for VT1616? */ return 0; } /* EEPROM image */ static unsigned char k8x800_eeprom[] __devinitdata = { [ICE_EEP2_SYSCONF] = 0x01, /* clock 256, 1ADC, 2DACs */ [ICE_EEP2_ACLINK] = 0x02, /* ACLINK, packed */ [ICE_EEP2_I2S] = 0x00, /* - */ [ICE_EEP2_SPDIF] = 0x00, /* - */ [ICE_EEP2_GPIO_DIR] = 0xff, [ICE_EEP2_GPIO_DIR1] = 0xff, [ICE_EEP2_GPIO_DIR2] = 0x00, /* - */ [ICE_EEP2_GPIO_MASK] = 0xff, [ICE_EEP2_GPIO_MASK1] = 0xff, [ICE_EEP2_GPIO_MASK2] = 0x00, /* - */ [ICE_EEP2_GPIO_STATE] = 0x00, [ICE_EEP2_GPIO_STATE1] = 0x00, [ICE_EEP2_GPIO_STATE2] = 0x00, /* - */ }; static unsigned char sn25p_eeprom[] __devinitdata = { [ICE_EEP2_SYSCONF] = 0x01, /* clock 256, 1ADC, 2DACs */ [ICE_EEP2_ACLINK] = 0x02, /* ACLINK, packed */ [ICE_EEP2_I2S] = 0x00, /* - */ [ICE_EEP2_SPDIF] = 0x41, /* - */ [ICE_EEP2_GPIO_DIR] = 0xff, [ICE_EEP2_GPIO_DIR1] = 0xff, [ICE_EEP2_GPIO_DIR2] = 0x00, /* - */ [ICE_EEP2_GPIO_MASK] = 0xff, [ICE_EEP2_GPIO_MASK1] = 0xff, [ICE_EEP2_GPIO_MASK2] = 0x00, /* - */ [ICE_EEP2_GPIO_STATE] = 0x00, [ICE_EEP2_GPIO_STATE1] = 0x00, [ICE_EEP2_GPIO_STATE2] = 0x00, /* - */ }; /* entry point */ struct snd_ice1712_card_info snd_vt1720_mobo_cards[] __devinitdata = { { .subvendor = VT1720_SUBDEVICE_K8X800, .name = "Albatron K8X800 Pro II", .model = "k8x800", .chip_init = k8x800_init, .build_controls = k8x800_add_controls, .eeprom_size = sizeof(k8x800_eeprom), .eeprom_data = k8x800_eeprom, }, { .subvendor = VT1720_SUBDEVICE_ZNF3_150, .name = "Chaintech ZNF3-150", /* identical with k8x800 */ .chip_init = k8x800_init, .build_controls = k8x800_add_controls, .eeprom_size = sizeof(k8x800_eeprom), .eeprom_data = k8x800_eeprom, }, { .subvendor = VT1720_SUBDEVICE_ZNF3_250, .name = "Chaintech ZNF3-250", /* identical with k8x800 */ .chip_init = k8x800_init, .build_controls = k8x800_add_controls, .eeprom_size = sizeof(k8x800_eeprom), .eeprom_data = k8x800_eeprom, }, { .subvendor = VT1720_SUBDEVICE_9CJS, .name = "Chaintech 9CJS", /* identical with k8x800 */ .chip_init = k8x800_init, .build_controls = k8x800_add_controls, .eeprom_size = sizeof(k8x800_eeprom), .eeprom_data = k8x800_eeprom, }, { .subvendor = VT1720_SUBDEVICE_SN25P, .name = "Shuttle SN25P", .model = "sn25p", .chip_init = k8x800_init, .build_controls = k8x800_add_controls, .eeprom_size = sizeof(k8x800_eeprom), .eeprom_data = sn25p_eeprom, }, { } /* terminator */ };
gpl-2.0
htc-mirror/evita-ics-crc-3.0.8-271616b
arch/unicore32/mm/pgd.c
10757
2145
/* * linux/arch/unicore32/mm/pgd.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/mm.h> #include <linux/gfp.h> #include <linux/highmem.h> #include <asm/pgalloc.h> #include <asm/page.h> #include <asm/tlbflush.h> #include "mm.h" #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) /* * need to get a 4k page for level 1 */ pgd_t *get_pgd_slow(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 0); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { /* * On UniCore, first page must always be allocated since it * contains the machine vectors. */ new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pmd = pmd_offset((pud_t *)init_pgd, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte(new_pte, *init_pte); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: free_pages((unsigned long)new_pgd, 0); no_pgd: return NULL; } void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) { pmd_t *pmd; pgtable_t pte; if (!pgd) return; /* pgd is always present and good */ pmd = pmd_off(pgd, 0); if (pmd_none(*pmd)) goto free; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); goto free; } pte = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free(mm, pte); pmd_free(mm, pmd); free: free_pages((unsigned long) pgd, 0); }
gpl-2.0
ptmr3/i717_JB_Kernel
arch/x86/kernel/cpu/mtrr/cyrix.c
11269
5820
#include <linux/init.h> #include <linux/io.h> #include <linux/mm.h> #include <asm/processor-cyrix.h> #include <asm/processor-flags.h> #include <asm/mtrr.h> #include <asm/msr.h> #include "mtrr.h" static void cyrix_get_arr(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type * type) { unsigned char arr, ccr3, rcr, shift; unsigned long flags; arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ local_irq_save(flags); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ ((unsigned char *)base)[3] = getCx86(arr); ((unsigned char *)base)[2] = getCx86(arr + 1); ((unsigned char *)base)[1] = getCx86(arr + 2); rcr = getCx86(CX86_RCR_BASE + reg); setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ local_irq_restore(flags); shift = ((unsigned char *) base)[1] & 0x0f; *base >>= PAGE_SHIFT; /* * Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 * Note: shift==0xf means 4G, this is unsupported. */ if (shift) *size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1); else *size = 0; /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */ if (reg < 7) { switch (rcr) { case 1: *type = MTRR_TYPE_UNCACHABLE; break; case 8: *type = MTRR_TYPE_WRBACK; break; case 9: *type = MTRR_TYPE_WRCOMB; break; case 24: default: *type = MTRR_TYPE_WRTHROUGH; break; } } else { switch (rcr) { case 0: *type = MTRR_TYPE_UNCACHABLE; break; case 8: *type = MTRR_TYPE_WRCOMB; break; case 9: *type = MTRR_TYPE_WRBACK; break; case 25: default: *type = MTRR_TYPE_WRTHROUGH; break; } } } /* * cyrix_get_free_region - get a free ARR. * * @base: the starting (base) address of the region. * @size: the size (in bytes) of the region. * * Returns: the index of the region on success, else -1 on error. */ static int cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) { unsigned long lbase, lsize; mtrr_type ltype; int i; switch (replace_reg) { case 7: if (size < 0x40) break; case 6: case 5: case 4: return replace_reg; case 3: case 2: case 1: case 0: return replace_reg; } /* If we are to set up a region >32M then look at ARR7 immediately */ if (size > 0x2000) { cyrix_get_arr(7, &lbase, &lsize, &ltype); if (lsize == 0) return 7; /* Else try ARR0-ARR6 first */ } else { for (i = 0; i < 7; i++) { cyrix_get_arr(i, &lbase, &lsize, &ltype); if (lsize == 0) return i; } /* * ARR0-ARR6 isn't free * try ARR7 but its size must be at least 256K */ cyrix_get_arr(i, &lbase, &lsize, &ltype); if ((lsize == 0) && (size >= 0x40)) return i; } return -ENOSPC; } static u32 cr4, ccr3; static void prepare_set(void) { u32 cr0; /* Save value of CR4 and clear Page Global Enable (bit 7) */ if (cpu_has_pge) { cr4 = read_cr4(); write_cr4(cr4 & ~X86_CR4_PGE); } /* * Disable and flush caches. * Note that wbinvd flushes the TLBs as a side-effect */ cr0 = read_cr0() | X86_CR0_CD; wbinvd(); write_cr0(cr0); wbinvd(); /* Cyrix ARRs - everything else was excluded at the top */ ccr3 = getCx86(CX86_CCR3); /* Cyrix ARRs - everything else was excluded at the top */ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); } static void post_set(void) { /* Flush caches and TLBs */ wbinvd(); /* Cyrix ARRs - everything else was excluded at the top */ setCx86(CX86_CCR3, ccr3); /* Enable caches */ write_cr0(read_cr0() & 0xbfffffff); /* Restore value of CR4 */ if (cpu_has_pge) write_cr4(cr4); } static void cyrix_set_arr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) { unsigned char arr, arr_type, arr_size; arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */ if (reg >= 7) size >>= 6; size &= 0x7fff; /* make sure arr_size <= 14 */ for (arr_size = 0; size; arr_size++, size >>= 1) ; if (reg < 7) { switch (type) { case MTRR_TYPE_UNCACHABLE: arr_type = 1; break; case MTRR_TYPE_WRCOMB: arr_type = 9; break; case MTRR_TYPE_WRTHROUGH: arr_type = 24; break; default: arr_type = 8; break; } } else { switch (type) { case MTRR_TYPE_UNCACHABLE: arr_type = 0; break; case MTRR_TYPE_WRCOMB: arr_type = 8; break; case MTRR_TYPE_WRTHROUGH: arr_type = 25; break; default: arr_type = 9; break; } } prepare_set(); base <<= PAGE_SHIFT; setCx86(arr + 0, ((unsigned char *)&base)[3]); setCx86(arr + 1, ((unsigned char *)&base)[2]); setCx86(arr + 2, (((unsigned char *)&base)[1]) | arr_size); setCx86(CX86_RCR_BASE + reg, arr_type); post_set(); } typedef struct { unsigned long base; unsigned long size; mtrr_type type; } arr_state_t; static arr_state_t arr_state[8] = { {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL} }; static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 }; static void cyrix_set_all(void) { int i; prepare_set(); /* the CCRs are not contiguous */ for (i = 0; i < 4; i++) setCx86(CX86_CCR0 + i, ccr_state[i]); for (; i < 7; i++) setCx86(CX86_CCR4 + i, ccr_state[i]); for (i = 0; i < 8; i++) { cyrix_set_arr(i, arr_state[i].base, arr_state[i].size, arr_state[i].type); } post_set(); } static const struct mtrr_ops cyrix_mtrr_ops = { .vendor = X86_VENDOR_CYRIX, .set_all = cyrix_set_all, .set = cyrix_set_arr, .get = cyrix_get_arr, .get_free_region = cyrix_get_free_region, .validate_add_page = generic_validate_add_page, .have_wrcomb = positive_have_wrcomb, }; int __init cyrix_init_mtrr(void) { set_mtrr_ops(&cyrix_mtrr_ops); return 0; }
gpl-2.0
schqiushui/HTL21-KitKat-3.4.X
arch/powerpc/boot/cuboot-kilauea.c
13829
1275
/* * Old U-boot compatibility for PPC405EX. This image is already included * a dtb. * * Author: Tiejun Chen <tiejun.chen@windriver.com> * * Copyright (C) 2009 Wind River Systems, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "io.h" #include "dcr.h" #include "stdio.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" #define KILAUEA_SYS_EXT_SERIAL_CLOCK 11059200 /* ext. 11.059MHz clk */ static bd_t bd; static void kilauea_fixups(void) { unsigned long sysclk = 33333333; ibm405ex_fixup_clocks(sysclk, KILAUEA_SYS_EXT_SERIAL_CLOCK); dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = kilauea_fixups; platform_ops.exit = ibm40x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
tgnice/kvm
drivers/char/xilinx_hwicap/fifo_icap.c
15109
11906
/***************************************************************************** * * Author: Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. * * (c) Copyright 2007-2008 Xilinx Inc. * All rights reserved. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * *****************************************************************************/ #include "fifo_icap.h" /* Register offsets for the XHwIcap device. */ #define XHI_GIER_OFFSET 0x1C /* Device Global Interrupt Enable Reg */ #define XHI_IPISR_OFFSET 0x20 /* Interrupt Status Register */ #define XHI_IPIER_OFFSET 0x28 /* Interrupt Enable Register */ #define XHI_WF_OFFSET 0x100 /* Write FIFO */ #define XHI_RF_OFFSET 0x104 /* Read FIFO */ #define XHI_SZ_OFFSET 0x108 /* Size Register */ #define XHI_CR_OFFSET 0x10C /* Control Register */ #define XHI_SR_OFFSET 0x110 /* Status Register */ #define XHI_WFV_OFFSET 0x114 /* Write FIFO Vacancy Register */ #define XHI_RFO_OFFSET 0x118 /* Read FIFO Occupancy Register */ /* Device Global Interrupt Enable Register (GIER) bit definitions */ #define XHI_GIER_GIE_MASK 0x80000000 /* Global Interrupt enable Mask */ /** * HwIcap Device Interrupt Status/Enable Registers * * Interrupt Status Register (IPISR) : This register holds the * interrupt status flags for the device. These bits are toggle on * write. * * Interrupt Enable Register (IPIER) : This register is used to enable * interrupt sources for the device. * Writing a '1' to a bit enables the corresponding interrupt. * Writing a '0' to a bit disables the corresponding interrupt. * * IPISR/IPIER registers have the same bit definitions and are only defined * once. */ #define XHI_IPIXR_RFULL_MASK 0x00000008 /* Read FIFO Full */ #define XHI_IPIXR_WEMPTY_MASK 0x00000004 /* Write FIFO Empty */ #define XHI_IPIXR_RDP_MASK 0x00000002 /* Read FIFO half full */ #define XHI_IPIXR_WRP_MASK 0x00000001 /* Write FIFO half full */ #define XHI_IPIXR_ALL_MASK 0x0000000F /* Mask of all interrupts */ /* Control Register (CR) */ #define XHI_CR_SW_RESET_MASK 0x00000008 /* SW Reset Mask */ #define XHI_CR_FIFO_CLR_MASK 0x00000004 /* FIFO Clear Mask */ #define XHI_CR_READ_MASK 0x00000002 /* Read from ICAP to FIFO */ #define XHI_CR_WRITE_MASK 0x00000001 /* Write from FIFO to ICAP */ #define XHI_WFO_MAX_VACANCY 1024 /* Max Write FIFO Vacancy, in words */ #define XHI_RFO_MAX_OCCUPANCY 256 /* Max Read FIFO Occupancy, in words */ /* The maximum amount we can request from fifo_icap_get_configuration at once, in bytes. */ #define XHI_MAX_READ_TRANSACTION_WORDS 0xFFF /** * fifo_icap_fifo_write - Write data to the write FIFO. * @drvdata: a pointer to the drvdata. * @data: the 32-bit value to be written to the FIFO. * * This function will silently fail if the fifo is full. **/ static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata, u32 data) { dev_dbg(drvdata->dev, "fifo_write: %x\n", data); out_be32(drvdata->base_address + XHI_WF_OFFSET, data); } /** * fifo_icap_fifo_read - Read data from the Read FIFO. * @drvdata: a pointer to the drvdata. * * This function will silently fail if the fifo is empty. **/ static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata) { u32 data = in_be32(drvdata->base_address + XHI_RF_OFFSET); dev_dbg(drvdata->dev, "fifo_read: %x\n", data); return data; } /** * fifo_icap_set_read_size - Set the the size register. * @drvdata: a pointer to the drvdata. * @data: the size of the following read transaction, in words. **/ static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, u32 data) { out_be32(drvdata->base_address + XHI_SZ_OFFSET, data); } /** * fifo_icap_start_config - Initiate a configuration (write) to the device. * @drvdata: a pointer to the drvdata. **/ static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) { out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_WRITE_MASK); dev_dbg(drvdata->dev, "configuration started\n"); } /** * fifo_icap_start_readback - Initiate a readback from the device. * @drvdata: a pointer to the drvdata. **/ static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) { out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_READ_MASK); dev_dbg(drvdata->dev, "readback started\n"); } /** * fifo_icap_get_status - Get the contents of the status register. * @drvdata: a pointer to the drvdata. * * The status register contains the ICAP status and the done bit. * * D8 - cfgerr * D7 - dalign * D6 - rip * D5 - in_abort_l * D4 - Always 1 * D3 - Always 1 * D2 - Always 1 * D1 - Always 1 * D0 - Done bit **/ u32 fifo_icap_get_status(struct hwicap_drvdata *drvdata) { u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET); dev_dbg(drvdata->dev, "Getting status = %x\n", status); return status; } /** * fifo_icap_busy - Return true if the ICAP is still processing a transaction. * @drvdata: a pointer to the drvdata. **/ static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) { u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET); return (status & XHI_SR_DONE_MASK) ? 0 : 1; } /** * fifo_icap_write_fifo_vacancy - Query the write fifo available space. * @drvdata: a pointer to the drvdata. * * Return the number of words that can be safely pushed into the write fifo. **/ static inline u32 fifo_icap_write_fifo_vacancy( struct hwicap_drvdata *drvdata) { return in_be32(drvdata->base_address + XHI_WFV_OFFSET); } /** * fifo_icap_read_fifo_occupancy - Query the read fifo available data. * @drvdata: a pointer to the drvdata. * * Return the number of words that can be safely read from the read fifo. **/ static inline u32 fifo_icap_read_fifo_occupancy( struct hwicap_drvdata *drvdata) { return in_be32(drvdata->base_address + XHI_RFO_OFFSET); } /** * fifo_icap_set_configuration - Send configuration data to the ICAP. * @drvdata: a pointer to the drvdata. * @frame_buffer: a pointer to the data to be written to the * ICAP device. * @num_words: the number of words (32 bit) to write to the ICAP * device. * This function writes the given user data to the Write FIFO in * polled mode and starts the transfer of the data to * the ICAP device. **/ int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *frame_buffer, u32 num_words) { u32 write_fifo_vacancy = 0; u32 retries = 0; u32 remaining_words; dev_dbg(drvdata->dev, "fifo_set_configuration\n"); /* * Check if the ICAP device is Busy with the last Read/Write */ if (fifo_icap_busy(drvdata)) return -EBUSY; /* * Set up the buffer pointer and the words to be transferred. */ remaining_words = num_words; while (remaining_words > 0) { /* * Wait until we have some data in the fifo. */ while (write_fifo_vacancy == 0) { write_fifo_vacancy = fifo_icap_write_fifo_vacancy(drvdata); retries++; if (retries > XHI_MAX_RETRIES) return -EIO; } /* * Write data into the Write FIFO. */ while ((write_fifo_vacancy != 0) && (remaining_words > 0)) { fifo_icap_fifo_write(drvdata, *frame_buffer); remaining_words--; write_fifo_vacancy--; frame_buffer++; } /* Start pushing whatever is in the FIFO into the ICAP. */ fifo_icap_start_config(drvdata); } /* Wait until the write has finished. */ while (fifo_icap_busy(drvdata)) { retries++; if (retries > XHI_MAX_RETRIES) break; } dev_dbg(drvdata->dev, "done fifo_set_configuration\n"); /* * If the requested number of words have not been read from * the device then indicate failure. */ if (remaining_words != 0) return -EIO; return 0; } /** * fifo_icap_get_configuration - Read configuration data from the device. * @drvdata: a pointer to the drvdata. * @data: Address of the data representing the partial bitstream * @size: the size of the partial bitstream in 32 bit words. * * This function reads the specified number of words from the ICAP device in * the polled mode. */ int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *frame_buffer, u32 num_words) { u32 read_fifo_occupancy = 0; u32 retries = 0; u32 *data = frame_buffer; u32 remaining_words; u32 words_to_read; dev_dbg(drvdata->dev, "fifo_get_configuration\n"); /* * Check if the ICAP device is Busy with the last Write/Read */ if (fifo_icap_busy(drvdata)) return -EBUSY; remaining_words = num_words; while (remaining_words > 0) { words_to_read = remaining_words; /* The hardware has a limit on the number of words that can be read at one time. */ if (words_to_read > XHI_MAX_READ_TRANSACTION_WORDS) words_to_read = XHI_MAX_READ_TRANSACTION_WORDS; remaining_words -= words_to_read; fifo_icap_set_read_size(drvdata, words_to_read); fifo_icap_start_readback(drvdata); while (words_to_read > 0) { /* Wait until we have some data in the fifo. */ while (read_fifo_occupancy == 0) { read_fifo_occupancy = fifo_icap_read_fifo_occupancy(drvdata); retries++; if (retries > XHI_MAX_RETRIES) return -EIO; } if (read_fifo_occupancy > words_to_read) read_fifo_occupancy = words_to_read; words_to_read -= read_fifo_occupancy; /* Read the data from the Read FIFO. */ while (read_fifo_occupancy != 0) { *data++ = fifo_icap_fifo_read(drvdata); read_fifo_occupancy--; } } } dev_dbg(drvdata->dev, "done fifo_get_configuration\n"); return 0; } /** * buffer_icap_reset - Reset the logic of the icap device. * @drvdata: a pointer to the drvdata. * * This function forces the software reset of the complete HWICAP device. * All the registers will return to the default value and the FIFO is also * flushed as a part of this software reset. */ void fifo_icap_reset(struct hwicap_drvdata *drvdata) { u32 reg_data; /* * Reset the device by setting/clearing the RESET bit in the * Control Register. */ reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET); out_be32(drvdata->base_address + XHI_CR_OFFSET, reg_data | XHI_CR_SW_RESET_MASK); out_be32(drvdata->base_address + XHI_CR_OFFSET, reg_data & (~XHI_CR_SW_RESET_MASK)); } /** * fifo_icap_flush_fifo - This function flushes the FIFOs in the device. * @drvdata: a pointer to the drvdata. */ void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) { u32 reg_data; /* * Flush the FIFO by setting/clearing the FIFO Clear bit in the * Control Register. */ reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET); out_be32(drvdata->base_address + XHI_CR_OFFSET, reg_data | XHI_CR_FIFO_CLR_MASK); out_be32(drvdata->base_address + XHI_CR_OFFSET, reg_data & (~XHI_CR_FIFO_CLR_MASK)); }
gpl-2.0
geodynamics/gale
boost/libs/config/test/no_variadic_macros_pass.cpp
6
1093
// This file was automatically generated on Tue Aug 17 09:59:01 2010 // by libs/config/tools/generate.cpp // Copyright John Maddock 2002-4. // Use, modification and distribution are subject to the // Boost Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // See http://www.boost.org/libs/config for the most recent version.// // Revision $Id: generate.cpp 49281 2008-10-11 15:40:44Z johnmaddock $ // // Test file for macro BOOST_NO_VARIADIC_MACROS // This file should compile, if it does not then // BOOST_NO_VARIADIC_MACROS should be defined. // See file boost_no_variadic_macros.ipp for details // Must not have BOOST_ASSERT_CONFIG set; it defeats // the objective of this file: #ifdef BOOST_ASSERT_CONFIG # undef BOOST_ASSERT_CONFIG #endif #include <boost/config.hpp> #include "test.hpp" #ifndef BOOST_NO_VARIADIC_MACROS #include "boost_no_variadic_macros.ipp" #else namespace boost_no_variadic_macros = empty_boost; #endif int main( int, char *[] ) { return boost_no_variadic_macros::test(); }
gpl-2.0
vitek999/Lenovo-a328
drivers/misc/mediatek/m4u/mt6582/m4u_priv.c
6
168505
#include <linux/uaccess.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/cdev.h> #include <linux/interrupt.h> #include <asm/io.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/earlysuspend.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/aee.h> #include <linux/timer.h> #include <linux/disp_assert_layer.h> #include <linux/xlog.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <asm/mach/map.h> #include <mach/sync_write.h> #include <mach/mt_irq.h> #include <mach/mt_clkmgr.h> #include <mach/irqs.h> //#include <mach/mt_boot.h> #include <asm/cacheflush.h> #include <asm/system.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/printk.h> #include <mach/m4u.h> #include <mach/mt_smi.h> #include "m4u_reg.h" #include "smi_common.h" #include <ddp_reg.h> #include <linux/m4u_profile.h> #include "m4u_priv.h" #define M4U_ASSERT(x) if(!(x)){xlog_printk(ANDROID_LOG_ERROR, "M4U", "assert fail, file:%s, line:%d", __FILE__, __LINE__);} //#define MTK_M4U_DBG #ifdef MTK_M4U_DBG #define M4UDBG(string, args...) xlog_printk(ANDROID_LOG_INFO, "M4U", "[pid=%d]"string,current->tgid,##args); bool gM4uLogFlag = false; #define M4ULOG(string, args...) xlog_printk(ANDROID_LOG_INFO, "M4U", "[pid=%d] "string,current->tgid,##args) #else #define M4UDBG(string, args...) bool gM4uLogFlag = false; #define M4ULOG(string, args...) do { \ if(gM4uLogFlag){ \ xlog_printk(ANDROID_LOG_INFO, "M4U", "[pid=%d] "string,current->tgid,##args); } \ }while(0) #endif #define M4UMSG(string, args...) xlog_printk(ANDROID_LOG_INFO, "M4U", string,##args) #define M4UINFO(string, args...) xlog_printk(ANDROID_LOG_DEBUG, "M4U", string,##args) //#define M4UINFO(string, args...) xlog_printk(ANDROID_LOG_INFO, "M4U", string,##args) //#define KERN_INFO #define M4UTMP(string, args...) xlog_printk(ANDROID_LOG_INFO, "M4U", string,##args) #define M4UERR(string, args...) do {\ xlog_printk(ANDROID_LOG_ERROR, "M4U", "error: "string,##args); \ aee_kernel_exception("M4U", "[M4U] error:"string,##args); \ }while(0) static char m4u_name[100]; #define m4u_aee_print(string, args...) do{\ snprintf(m4u_name,100, "[M4U]"string, ##args); \ aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_MMPROFILE_BUFFER, m4u_name, "[M4U] error:"string, ##args); \ }while(0) #define PFNMAP_FLAG_SET 0x00555555 #define M4U_USE_ONE_PAGETABLE #define M4U_COPY_NONSEC_PT_TO_SEC // garbage collect related #define MVA_REGION_FLAG_NONE 0x0 #define MVA_REGION_HAS_TLB_RANGE 0x1 #define MVA_REGION_REGISTER 0x2 // list element, each element record mva's size, start addr info // if user process dose not call mva_alloc() and mva_dealloc() in pair // we will help to call mva_dealloc() according to elements' info static mva_info_t gMvaNode_unkown = { .bufAddr = 0, .mvaStart = 0, .size = 0, .eModuleId = M4U_PORT_UNKNOWN, }; //------------------------------------Defines & Data for alloc mva------------- //---------------------------------------------------------------------- /// macros to handle M4u Page Table processing #define M4U_MVA_MAX 0x3fffffff // 1G #define M4U_PAGE_MASK 0xfff #define M4U_PAGE_SIZE 0x1000 //4KB #define DEFAULT_PAGE_SIZE 0x1000 //4KB #define M4U_PTE_MAX (M4U_GET_PTE_OFST_TO_PT_SA(TOTAL_MVA_RANGE-1)) #define mva_pteAddr_nonsec(mva) ((unsigned int *)pPT_nonsec+((mva) >> 12)) #define mva_pteAddr_sec(mva) ((unsigned int *)pPT_sec+((mva) >> 12)) #define mva_pteAddr(mva) mva_pteAddr_nonsec(mva) // ((va&0xfff)+size+0xfff)>>12 #define M4U_GET_PAGE_NUM(va,size) ((((unsigned int)(va)&(M4U_PAGE_SIZE-1))+(size)+(M4U_PAGE_SIZE-1))>>12) #define mva_pageOffset(mva) ((mva)&0xfff) #define MVA_BLOCK_SIZE_ORDER 18 //256K #define MVA_MAX_BLOCK_NR 4095 //1GB #define MVA_BLOCK_SIZE (1<<MVA_BLOCK_SIZE_ORDER) //0x40000 #define MVA_BLOCK_ALIGN_MASK (MVA_BLOCK_SIZE-1) //0x3ffff #define MVA_BLOCK_NR_MASK (MVA_MAX_BLOCK_NR) //0xfff #define MVA_BUSY_MASK (1<<15) //0x8000 #define MVA_IS_BUSY(index) ((mvaGraph[index]&MVA_BUSY_MASK)!=0) #define MVA_SET_BUSY(index) (mvaGraph[index] |= MVA_BUSY_MASK) #define MVA_SET_FREE(index) (mvaGraph[index] & (~MVA_BUSY_MASK)) #define MVA_GET_NR(index) (mvaGraph[index] & MVA_BLOCK_NR_MASK) #define MVAGRAPH_INDEX(mva) (mva>>MVA_BLOCK_SIZE_ORDER) static short mvaGraph[MVA_MAX_BLOCK_NR+1]; static mva_info_t* mvaInfoGraph[MVA_MAX_BLOCK_NR+1]; //#define M4U_MVA_ALLOC_DEBUG #ifdef M4U_MVA_ALLOC_DEBUG #define M4U_MVA_DBUG(string, args...) printk("[M4U_K][MVA]"string,##args) #define M4U_mvaGraph_dump_DBG() m4u_mvaGraph_dump() #else #define M4U_MVA_DBUG(string, args...) #define M4U_mvaGraph_dump_DBG() #endif static DEFINE_SPINLOCK(gMvaGraph_lock); static void m4u_mvaGraph_init(void); static void m4u_mvaGraph_dump_raw(void); static void m4u_mvaGraph_dump(void); static int m4u_dealloc_mva_dynamic(M4U_MODULE_ID_ENUM eModuleID, const unsigned int BufAddr, const unsigned int BufSize, unsigned int mvaRegionAddr, struct sg_table* sg_table); static unsigned int m4u_do_mva_alloc(M4U_MODULE_ID_ENUM eModuleID, const unsigned int BufAddr, const unsigned int BufSize, mva_info_t *pMvaInfo); static int m4u_do_mva_free(M4U_MODULE_ID_ENUM eModuleID, const unsigned int BufAddr, const unsigned int BufSize, unsigned int mvaRegionStart) ; static int m4u_dump_pagetable(M4U_MODULE_ID_ENUM eModuleID); static int m4u_confirm_range_invalidated(int m4u_index, unsigned int MVAStart, unsigned int MVAEnd); static bool m4u_struct_init(void); static int m4u_hw_init(void); static int m4u_get_pages(M4U_MODULE_ID_ENUM eModuleID, unsigned int BufAddr, unsigned int BufSize, unsigned int* pPageTableAddr); static int m4u_get_pages_sg(M4U_MODULE_ID_ENUM eModuleID, unsigned int BufAddr, unsigned int BufSize, struct sg_table* sg_table, unsigned int* pPhys); static int m4u_release_pages(M4U_MODULE_ID_ENUM eModuleID, unsigned int BufAddr, unsigned int BufSize, unsigned int MVA, struct sg_table* sg_table); //static M4U_DMA_DIR_ENUM m4u_get_dir_by_module(M4U_MODULE_ID_ENUM eModuleID); static void m4u_clear_intr(unsigned int m4u_base); static int m4u_port_2_m4u_id(M4U_PORT_ID_ENUM portID); static void m4u_memory_usage(bool bPrintAll); static void m4u_print_active_port(unsigned int m4u_index); static M4U_MODULE_ID_ENUM m4u_port_2_module(M4U_PORT_ID_ENUM portID); static char* m4u_get_port_name(M4U_PORT_ID_ENUM portID); static char* m4u_get_module_name(M4U_MODULE_ID_ENUM moduleID); unsigned int m4u_get_pa_by_mva(unsigned int mva); static int m4u_dump_user_addr_register(M4U_PORT_ID_ENUM port); static int m4u_free_garbage_list(mva_info_t *pList); static int m4u_manual_insert_entry(M4U_PORT_ID_ENUM eModuleID, unsigned int EntryMVA, int secure_pagetable, int Lock); static int m4u_add_to_garbage_list(struct file * a_pstFile,mva_info_t *pList); static mva_info_t* m4u_delete_from_garbage_list(M4U_MOUDLE_STRUCT* p_m4u_module, struct file * a_pstFile); M4U_PORT_ID_ENUM m4u_get_error_port(unsigned int m4u_index, unsigned int mva); static int m4u_dump_mva_info(void); static void m4u_dump_pagetable_range(unsigned int vaStart, unsigned int nr); static void m4u_print_mva_list(struct file *filep, const char *pMsg); extern void munlock_vma_page(struct page *page); static int m4u_dump_main_tlb_tags(int m4u_id) ; static int m4u_dump_main_tlb_des(int m4u_id); static int m4u_dump_pfh_tlb_tags(int m4u_id); static int m4u_dump_pfh_tlb_des(int m4u_id); static int m4u_enable_error_hang(int m4u_id); static int m4u_disable_error_hang(int m4u_id); static int m4u_search_main_invalid(int m4u_id); static unsigned int m4u_get_main_descriptor(unsigned int m4u_base, unsigned int idx); static unsigned int m4u_get_pfh_descriptor(unsigned int m4u_base, int tlbIndex, int tlbSelect); static void m4u_profile_init(void); extern void smp_inner_dcache_flush_all(void); static int m4u_cache_sync_init(void); //-------------------------------------Global variables------------------------------------------------// #define MAX_BUF_SIZE_TO_GET_USER_PAGE (200*1024*1024) //200MB at most for single time alloc extern unsigned char *pMlock_cnt; extern unsigned int mlock_cnt_size; // record memory usage int* pmodule_max_size=NULL; int* pmodule_current_size=NULL; int* pmodule_locked_pages=NULL; unsigned int gM4UBaseAddr[TOTAL_M4U_NUM] = {M4U_BASE0, M4U_BASE1}; static unsigned int g4M4UTagCount[TOTAL_M4U_NUM] = {M4U_MAIN_TLB_NR, M4U_MAIN_TLB_NR}; //static unsigned int g4M4UWrapCount[TOTAL_M4U_NUM] = {M4U_WRAP_NR, M4U_WRAP_NR}; //static unsigned int g4M4UWrapOffset[TOTAL_M4U_NUM]= {0, M4U_WRAP_NR}; static volatile unsigned int FreeSEQRegs[TOTAL_M4U_NUM] = {M4U_SEQ_NR, M4U_SEQ_NR}; static volatile unsigned int FreeWrapRegs[TOTAL_M4U_NUM]= {M4U_WRAP_NR, M4U_WRAP_NR}; static unsigned int m4u_index_of_larb[SMI_LARB_NR] = {0,0,0}; static unsigned int smi_port0_in_larbx[SMI_LARB_NR+1] = {0, 9, 16, 33}; static unsigned int m4u_port0_in_larbx[SMI_LARB_NR+1] = {0, 9, 16, 33}; static int gM4U_L2_enable = 1; static unsigned int pt_pa_nonsec; //Page Table Physical Address, 64K align static unsigned int *pPT_nonsec; static unsigned int pt_pa_sec; static unsigned int *pPT_sec; #define TF_PROTECT_BUFFER_SIZE 128 static unsigned int ProtectPA = 0; static unsigned int *pProtectVA_nonCache = NULL; //unsigned int gM4U_align_page_va = 0; static unsigned int gM4U_align_page_pa = 0; //#define BACKUP_REG_SIZE (M4U_REG_SIZE*TOTAL_M4U_NUM) #define BACKUP_REG_SIZE 640 static unsigned int* pM4URegBackUp = 0; static M4U_RANGE_DES_T *pRangeDes = NULL; static M4U_WRAP_DES_T *pWrapDes = 0; #define RANGE_DES_ADDR 0x11 static int g_debug_make_translation_fault=0; static int g_debug_print_detail_in_isr=1; static int g_debug_enable_error_hang=0; static int g_debug_recover_pagetable_TF=0; static int g_debug_dump_rs_in_isr=0; static spinlock_t gM4u_reg_lock; static DEFINE_MUTEX(gM4uMutex); static DEFINE_MUTEX(gM4uMutexPower); #define MTK_M4U_DEV_MAJOR_NUMBER 188 static struct cdev * g_pMTKM4U_CharDrv = NULL; static dev_t g_MTKM4Udevno = MKDEV(MTK_M4U_DEV_MAJOR_NUMBER,0); #define M4U_DEVNAME "M4U_device" extern void init_mlock_cnt(void); extern unsigned int m4u_user_v2p(unsigned int va); extern int is_pmem_range(unsigned long* base, unsigned long size); extern int m4u_get_user_pages(int eModuleID, struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas); static unsigned int gModuleMaxMVASize[M4U_CLIENT_MODULE_NUM]; typedef enum { M4U_TEST_LEVEL_USER = 0, // performance best, least verification M4U_TEST_LEVEL_ENG = 1, // SQC used, more M4UMSG and M4UERR M4U_TEST_LEVEL_STRESS= 2 // stricker verification ,may use M4UERR instead M4UMSG sometimes, used for our own internal test } M4U_TEST_LEVEL_ENUM; M4U_TEST_LEVEL_ENUM gTestLevel = M4U_TEST_LEVEL_ENG; #define M4U_POW_ON_TRY(eModuleID) #define M4U_POW_OFF_TRY(eModuleID) //--------------------------------------Functions-----------------------------------------------------// static inline int m4u_port_2_larb_port(M4U_PORT_ID_ENUM port) { int i; for(i=SMI_LARB_NR-1; i>=0; i--) { if(port >= m4u_port0_in_larbx[i]) return (port-m4u_port0_in_larbx[i]); } return 0; } static inline int m4u_port_2_larb_id(M4U_PORT_ID_ENUM port) { int i; for(i=SMI_LARB_NR-1; i>=0; i--) { if(port >= m4u_port0_in_larbx[i]) return i; } return 0; } static inline int larb_2_m4u_id(int larb) { return m4u_index_of_larb[larb]; } inline int m4u_port_2_m4u_id(M4U_PORT_ID_ENUM port) { return larb_2_m4u_id(m4u_port_2_larb_id(port)); } static inline int m4u_port_2_smi_port(M4U_PORT_ID_ENUM port) { int larb = m4u_port_2_larb_id(port); int local_port = m4u_port_2_larb_port(port); return smi_port0_in_larbx[larb]+local_port; } static inline M4U_PORT_ID_ENUM larb_port_2_m4u_port(unsigned int larb, unsigned int local_port) { return m4u_port0_in_larbx[larb]+local_port; } static int m4u_module_2_larb(M4U_MODULE_ID_ENUM eModuleID) { unsigned int larb = 0; if(eModuleID == M4U_CLNTMOD_LCDC_UI) return 0; if(eModuleID >= M4U_PORT_UNKNOWN) M4UMSG("m4u_module_2_larb errro: %d\n", eModuleID); else larb = m4u_port_2_larb_id((M4U_PORT_ID_ENUM)eModuleID); return larb; } M4U_MODULE_ID_ENUM m4u_port_2_module(M4U_PORT_ID_ENUM portID) { return portID; } int m4u_module_2_m4u_id(M4U_MODULE_ID_ENUM emoduleID) { return larb_2_m4u_id(m4u_module_2_larb(emoduleID)); } static int m4u_invalid_tlb(M4U_ID_ENUM m4u_id,int L2_en, int isInvAll, unsigned int mva_start, unsigned int mva_end) { unsigned int reg = 0; if(L2_en) reg = F_MMUg_CTRL_INV_EN2; if(m4u_id == M4U_ID_0) reg |= F_MMUg_CTRL_INV_EN0; else if(m4u_id == M4U_ID_1) reg |= F_MMUg_CTRL_INV_EN1; else { reg |= F_MMUg_CTRL_INV_EN0; reg |= F_MMUg_CTRL_INV_EN1; } COM_WriteReg32(REG_MMUg_CTRL, reg); if(isInvAll) { COM_WriteReg32(REG_MMUg_INVLD, F_MMUg_INV_ALL); } else { COM_WriteReg32(REG_MMUg_INVLD_SA ,mva_start & (~0xfff)); COM_WriteReg32(REG_MMUg_INVLD_EA, mva_end&(~0xfff)); COM_WriteReg32(REG_MMUg_INVLD, F_MMUg_INV_RANGE); } if(L2_en) { if(isInvAll) { unsigned int event = 0; while(!(event=m4uHw_get_field_by_mask(0, REG_L2_GDC_STATE, F_L2_GDC_ST_EVENT_MSK))); m4uHw_set_field_by_mask(0, REG_L2_GDC_STATE, F_L2_GDC_ST_EVENT_MSK, 0); } else { while(!(m4uHw_get_field_by_mask(0, REG_L2_GPE_STATUS, F_L2_GPE_ST_RANGE_INV_DONE))); m4uHw_set_field_by_mask(0, REG_L2_GPE_STATUS, F_L2_GPE_ST_RANGE_INV_DONE, 0); } } return 0; } static void m4u_invalid_tlb_all(M4U_ID_ENUM m4u_id, int L2_en) { m4u_invalid_tlb(m4u_id, L2_en, 1, 0, 0); } static void m4u_invalid_tlb_by_range(M4U_ID_ENUM m4u_id, int L2_en, unsigned int mva_start, unsigned int mva_end) { m4u_invalid_tlb(m4u_id, L2_en, 0, mva_start, mva_end); } static void m4u_invalid_tlb_sec_by_range(M4U_ID_ENUM m4u_id, int L2_en, unsigned int mva_start, unsigned int mva_end) { unsigned int reg = 0; if(L2_en) reg = F_MMUg_CTRL_SEC_INV_EN2; if(m4u_id == M4U_ID_0) reg |= F_MMUg_CTRL_SEC_INV_EN0; else if(m4u_id == M4U_ID_1) reg |= F_MMUg_CTRL_SEC_INV_EN1; else { reg |= F_MMUg_CTRL_SEC_INV_EN0; reg |= F_MMUg_CTRL_SEC_INV_EN1; } m4uHw_set_field_by_mask(0, REG_MMUg_CTRL_SEC, ~F_MMUg_CTRL_SEC_DBG, reg); COM_WriteReg32(REG_MMUg_INVLD_SA_SEC,mva_start & (~0xfff)); COM_WriteReg32(REG_MMUg_INVLD_EA_SEC, mva_end&(~0xfff)); COM_WriteReg32(REG_MMUg_INVLD_SEC, F_MMUg_INV_SEC_RANGE); if(L2_en) { while(!(m4uHw_get_field_by_mask(0, REG_L2_GPE_STATUS_SEC, F_L2_GPE_ST_RANGE_INV_DONE_SEC))); m4uHw_set_field_by_mask(0, REG_L2_GPE_STATUS_SEC, F_L2_GPE_ST_RANGE_INV_DONE_SEC, 0); } } static void m4u_L2_prefetch(unsigned int start, unsigned int end, int lock) { unsigned int reg; COM_WriteReg32(REG_MMUg_INVLD_SA ,start & (~0xfff)); COM_WriteReg32(REG_MMUg_INVLD_EA, end&(~0xfff)); mb(); reg = F_MMUg_CTRL_INV_EN2 | F_MMUg_CTRL_PRE_EN | (F_MMUg_CTRL_PRE_LOCK(!!lock)); COM_WriteReg32(REG_MMUg_CTRL, reg); while(!(m4uHw_get_field_by_mask(0, REG_L2_GPE_STATUS, F_L2_GPE_ST_PREFETCH_DONE))); m4uHw_set_field_by_mask(0, REG_L2_GPE_STATUS, F_L2_GPE_ST_PREFETCH_DONE, 0); } static int m4u_dump_maps(unsigned int addr) { struct vm_area_struct *vma; M4UMSG("addr=0x%x, name=%s,pid=0x%x,", addr, current->comm, current->pid); vma = find_vma(current->mm, addr); if(vma == NULL) { M4UMSG("dump_maps fail: find_vma return NULL\n"); return -1; } M4UMSG("find vma: 0x%08x-0x%08x, flags=0x%x\n", (unsigned int)(vma->vm_start), (unsigned int)(vma->vm_end), vma->vm_flags); return 0; } //file operations static int MTK_M4U_open(struct inode * a_pstInode, struct file * a_pstFile) { garbage_node_t * pNode; M4UDBG("enter MTK_M4U_open() process:%s\n",current->comm); //Allocate and initialize private data a_pstFile->private_data = kmalloc(sizeof(garbage_node_t) , GFP_ATOMIC); if(NULL == a_pstFile->private_data) { M4UMSG("Not enough entry for M4U open operation\n"); return -ENOMEM; } pNode = (garbage_node_t *)a_pstFile->private_data; mutex_init(&(pNode->dataMutex)); mutex_lock(&(pNode->dataMutex)); pNode->open_pid = current->pid; pNode->open_tgid = current->tgid; pNode->OwnResource = 0; pNode->isM4uDrvConstruct = 0; pNode->isM4uDrvDeconstruct = 0; INIT_LIST_HEAD(&(pNode->mvaList)); mutex_unlock(&(pNode->dataMutex)); return 0; } extern int m4u_reclaim_mva_callback_ovl(int moduleID, unsigned int va, unsigned int size, unsigned int mva); static int MTK_M4U_release(struct inode * a_pstInode, struct file * a_pstFile) { struct list_head *pListHead, *ptmp; garbage_node_t *pNode = a_pstFile->private_data; mva_info_t *pList; int error = 0; M4UDBG("enter MTK_M4U_release() process:%s\n",current->comm); mutex_lock(&(pNode->dataMutex)); if(pNode->isM4uDrvConstruct==0 || pNode->isM4uDrvDeconstruct==0 || !list_empty(&pNode->mvaList)) { M4UMSG("warning on close: construct=%d, deconstruct=%d, open_pid=%d, cur_pid=%d.\n", pNode->isM4uDrvConstruct, pNode->isM4uDrvDeconstruct, pNode->open_pid, current->pid); M4UMSG("open->tgid=%d, cur->tgid=%d, cur->mm=0x%x, inode=%p, file=%p.\n", pNode->open_tgid, current->tgid, current->mm, a_pstInode, a_pstFile); error = 1; } pListHead = pNode->mvaList.next; while(pListHead!= &(pNode->mvaList)) { ptmp = pListHead; pListHead = pListHead->next; pList = container_of(ptmp, mva_info_t, link); M4UMSG("warnning: clean garbage at m4u close: module=%s,va=0x%x,mva=0x%x,size=%d, inode=%p, file=%p\n", m4u_get_module_name(pList->eModuleId),pList->bufAddr,pList->mvaStart,pList->size, a_pstInode, a_pstFile); list_del(ptmp); //kfree(pList); notes: m4u_dealloc_mva will help to free listp //if registered but never has chance to query this buffer (we will allocate mva in query_mva) //then the mva will be 0, and MVA_REGION_REGISTER flag will be set. //we don't call deallocate for this mva, because it's 0 ... if(pList->mvaStart != 0) { int ret; m4u_reclaim_mva_callback_ovl(pList->eModuleId, pList->bufAddr, pList->size, pList->mvaStart); ret = m4u_dealloc_mva(pList->eModuleId, pList->bufAddr, pList->size, pList->mvaStart); if(ret) m4u_free_garbage_list(pList); } else { if(!(pList->flags&MVA_REGION_REGISTER)) M4UERR("warning: in garbage reclaim: mva==0, but MVA_REGION_REGISTER is not set!! flag=0x%x\n", pList->flags); } error = 1; } mutex_unlock(&(pNode->dataMutex)); if(error) M4UMSG("warning: clean mva done inode=%p, file=%p\n", a_pstInode, a_pstFile); if(NULL != a_pstFile->private_data) { kfree(a_pstFile->private_data); a_pstFile->private_data = NULL; } return 0; } static int MTK_M4U_flush(struct file * a_pstFile , fl_owner_t a_id) { M4UDBG("enter MTK_M4U_flush() process:%s\n", current->comm); return 0; } static long MTK_M4U_ioctl(struct file * a_pstFile, unsigned int a_Command, unsigned long a_Param) { int ret = 0; M4U_MOUDLE_STRUCT m4u_module; M4U_PORT_STRUCT m4u_port; M4U_PORT_STRUCT_ROTATOR m4u_port_rotator; M4U_PORT_ID_ENUM PortID; M4U_MODULE_ID_ENUM ModuleID; M4U_WRAP_DES_T m4u_wrap_range; M4U_CACHE_STRUCT m4u_cache_data; garbage_node_t *pNode = a_pstFile->private_data; switch(a_Command) { case MTK_M4U_T_POWER_ON : M4U_ASSERT(a_Param); ret = copy_from_user(&ModuleID, (void*)a_Param , sizeof(unsigned int)); if(ret) { M4UERR(" MTK_M4U_T_POWER_ON, copy_from_user failed, %d\n", ret); return -EFAULT; } ret = m4u_power_on(ModuleID); break; case MTK_M4U_T_POWER_OFF : M4U_ASSERT(a_Param); ret = copy_from_user(&ModuleID, (void*)a_Param , sizeof(unsigned int)); if(ret) { M4UERR(" MTK_M4U_T_POWER_OFF, copy_from_user failed, %d\n", ret); return -EFAULT; } ret = m4u_power_off(ModuleID); break; case MTK_M4U_T_ALLOC_MVA : M4U_ASSERT(a_Param); ret = copy_from_user(&m4u_module, (void*)a_Param , sizeof(M4U_MOUDLE_STRUCT)); if(ret) { M4UERR(" MTK_M4U_T_ALLOC_MVA, copy_from_user failed: %d\n", ret); return -EFAULT; } if(m4u_module.MVAStart == -1) //work around for wrap layer { m4u_module.MVAStart = m4u_user_v2p(m4u_module.BufAddr); M4UMSG("alloc_mva_pmem: module=%d,va=0x%x, pa=0x%x\n", m4u_module.eModuleID, m4u_module.BufAddr, m4u_module.MVAStart); ret = 0; } else { mva_info_t *pList = NULL; pList = m4u_alloc_garbage_list(0, m4u_module.BufSize, m4u_module.eModuleID, m4u_module.BufAddr, MVA_REGION_FLAG_NONE, m4u_module.security, m4u_module.cache_coherent); ret = __m4u_alloc_mva(pList, NULL); if(ret) { m4u_module.MVAStart = 0; //notes: mva_info node will be freed in __m4u_alloc_mva if failed. M4UMSG(" MTK_M4U_T_ALLOC_MVA, m4u_alloc_mva failed: %d\n", ret); return -EFAULT; } else { m4u_module.MVAStart = pList->mvaStart; m4u_add_to_garbage_list(a_pstFile, pList); } } ret = copy_to_user(&(((M4U_MOUDLE_STRUCT*)a_Param)->MVAStart), &(m4u_module.MVAStart) , sizeof(unsigned int)); if(ret) { M4UERR(" MTK_M4U_T_ALLOC_MVA, copy_from_user failed: %d\n", ret); return -EFAULT; } break; case MTK_M4U_T_QUERY_MVA : M4U_ASSERT(a_Param); ret = copy_from_user(&m4u_module, (void*)a_Param , sizeof(M4U_MOUDLE_STRUCT)); if(ret) { M4UERR(" MTK_M4U_T_QUERY_MVA, copy_from_user failed: %d\n", ret); return -EFAULT; } M4UDBG("-MTK_M4U_T_QUERY_MVA, module_id=%d, BufAddr=0x%x, BufSize=%d \r\n", m4u_module.eModuleID, m4u_module.BufAddr, m4u_module.BufSize ); m4u_query_mva(m4u_module.eModuleID, m4u_module.BufAddr, m4u_module.BufSize, &(m4u_module.MVAStart), a_pstFile); ret = copy_to_user(&(((M4U_MOUDLE_STRUCT*)a_Param)->MVAStart), &(m4u_module.MVAStart) , sizeof(unsigned int)); if(ret) { M4UERR(" MTK_M4U_T_QUERY_MVA, copy_from_user failed: %d\n", ret); return -EFAULT; } M4UDBG("MTK_M4U_T_QUERY_MVA, m4u_module.MVAStart=0x%x \n", m4u_module.MVAStart); break; case MTK_M4U_T_DEALLOC_MVA : { mva_info_t *pMvaInfo; M4U_ASSERT(a_Param); ret = copy_from_user(&m4u_module, (void*)a_Param , sizeof(M4U_MOUDLE_STRUCT)); if(ret) { M4UERR(" MTK_M4U_T_DEALLOC_MVA, copy_from_user failed: %d\n", ret); return -EFAULT; } M4UDBG("MTK_M4U_T_DEALLOC_MVA, eModuleID:%d, VABuf:0x%x, Length:%d, MVAStart=0x%x \r\n", m4u_module.eModuleID, m4u_module.BufAddr, m4u_module.BufSize, m4u_module.MVAStart); pMvaInfo = m4u_delete_from_garbage_list(&m4u_module, a_pstFile); if(pMvaInfo==NULL) { M4UMSG("error to dealloc mva: id=%s,va=0x%x,size=%d,mva=0x%x\n", m4u_get_module_name(m4u_module.eModuleID), m4u_module.BufAddr, m4u_module.BufSize, m4u_module.MVAStart); m4u_print_mva_list(a_pstFile, "in deallocate"); } else { //if user register a buffer without query it, //then we never allocated a real mva for it, //when deallocate, m4u_module.MVAStart==0, we think this is right. if(m4u_module.MVAStart!=0) { m4u_dealloc_mva(m4u_module.eModuleID, m4u_module.BufAddr, m4u_module.BufSize, m4u_module.MVAStart); } else { M4UMSG("warning: deallocat a registered buffer, before any query!\n"); M4UMSG("error to dealloc mva: id=%s,va=0x%x,size=%d,mva=0x%x\n", m4u_get_module_name(m4u_module.eModuleID), m4u_module.BufAddr, m4u_module.BufSize, m4u_module.MVAStart); } //m4u_free_garbage_list(pMvaInfo); } } break; case MTK_M4U_T_MANUAL_INSERT_ENTRY : M4U_ASSERT(a_Param); ret = copy_from_user(&m4u_module, (void*)a_Param , sizeof(M4U_MOUDLE_STRUCT)); if(ret) { M4UERR(" MTK_M4U_Manual_Insert_Entry, copy_from_user failed: %d\n", ret); return -EFAULT; } M4UDBG(" ManualInsertTLBEntry, eModuleID:%d, Entry_MVA:0x%x, locked:%d\r\n", m4u_module.eModuleID, m4u_module.EntryMVA, m4u_module.Lock); ret = m4u_manual_insert_entry(m4u_module.eModuleID, m4u_module.EntryMVA, !!(m4u_module.security), m4u_module.Lock); break; case MTK_M4U_T_INSERT_TLB_RANGE : M4U_ASSERT(a_Param); ret = copy_from_user(&m4u_module, (void*)a_Param , sizeof(M4U_MOUDLE_STRUCT)); if(ret) { M4UERR("m4u_insert_seq_range , copy_from_user failed: %d\n", ret); return -EFAULT; } M4UDBG("m4u_insert_seq_range , eModuleID:%d, MVAStart:0x%x, MVAEnd:0x%x, ePriority=%d \r\n", m4u_module.eModuleID, m4u_module.MVAStart, m4u_module.MVAEnd, m4u_module.ePriority); ret = m4u_insert_seq_range(m4u_module.eModuleID, m4u_module.MVAStart, m4u_module.MVAEnd, m4u_module.ePriority, m4u_module.entryCount); break; case MTK_M4U_T_INVALID_TLB_RANGE : M4U_ASSERT(a_Param); ret = copy_from_user(&m4u_module, (void*)a_Param , sizeof(M4U_MOUDLE_STRUCT)); if(ret) { M4UERR(" MTK_M4U_Invalid_TLB_Range, copy_from_user failed: %d\n", ret); return -EFAULT; } M4UDBG("MTK_M4U_Invalid_TLB_Range(), eModuleID:%d, MVAStart=0x%x, MVAEnd=0x%x \n", m4u_module.eModuleID, m4u_module.MVAStart, m4u_module.MVAEnd); ret = m4u_invalid_seq_range(m4u_module.eModuleID, m4u_module.MVAStart, m4u_module.MVAEnd); break; case MTK_M4U_T_INVALID_TLB_ALL : M4U_ASSERT(a_Param); ret = copy_from_user(&ModuleID, (void*)a_Param , sizeof(unsigned int)); if(ret) { M4UERR(" MTK_M4U_Invalid_TLB_Range, copy_from_user failed, %d\n", ret); return -EFAULT; } //ret = m4u_invalid_tlb_all(ModuleID); break; case MTK_M4U_T_DUMP_REG : M4U_ASSERT(a_Param); ret = copy_from_user(&ModuleID, (void*)a_Param , sizeof(unsigned int)); if(ret) { M4UERR(" MTK_M4U_Invalid_TLB_Range, copy_from_user failed, %d\n", ret); return -EFAULT; } m4u_dump_main_tlb_tags(m4u_module_2_m4u_id(ModuleID)); ret = m4u_dump_reg(ModuleID); break; case MTK_M4U_T_DUMP_INFO : M4U_ASSERT(a_Param); ret = copy_from_user(&ModuleID, (void*)a_Param , sizeof(unsigned int)); if(ret) { M4UERR(" MTK_M4U_Invalid_TLB_Range, copy_from_user failed, %d\n", ret); return -EFAULT; } ret = m4u_dump_info(m4u_module_2_m4u_id(ModuleID)); m4u_dump_pagetable(ModuleID); break; case MTK_M4U_T_CACHE_SYNC : M4U_ASSERT(a_Param); ret = copy_from_user(&m4u_cache_data, (void*)a_Param , sizeof(M4U_CACHE_STRUCT)); if(ret) { M4UERR(" MTK_M4U_T_CACHE_INVALID_AFTER_HW_WRITE_MEM, copy_from_user failed: %d\n", ret); return -EFAULT; } M4UDBG("MTK_M4U_T_CACHE_INVALID_AFTER_HW_WRITE_MEM(), moduleID=%d, eCacheSync=%d, buf_addr=0x%x, buf_length=0x%x \n", m4u_cache_data.eModuleID, m4u_cache_data.eCacheSync, m4u_cache_data.BufAddr, m4u_cache_data.BufSize); switch(m4u_cache_data.eCacheSync) { case M4U_CACHE_FLUSH_BEFORE_HW_WRITE_MEM: case M4U_CACHE_FLUSH_BEFORE_HW_READ_MEM: ret = m4u_dma_cache_maint(m4u_cache_data.eModuleID, (unsigned int*)(m4u_cache_data.BufAddr), m4u_cache_data.BufSize, M4U_DMA_READ_WRITE); break; case M4U_CACHE_CLEAN_BEFORE_HW_READ_MEM: ret = m4u_dma_cache_maint(m4u_cache_data.eModuleID, (unsigned int*)(m4u_cache_data.BufAddr), m4u_cache_data.BufSize, M4U_DMA_READ); break; case M4U_CACHE_INVALID_AFTER_HW_WRITE_MEM: ret = m4u_dma_cache_maint(m4u_cache_data.eModuleID, (unsigned int*)(m4u_cache_data.BufAddr), m4u_cache_data.BufSize, M4U_DMA_WRITE); break; default: M4UMSG("error: MTK_M4U_T_CACHE_SYNC, invalid eCacheSync=%d, module=%d \n", m4u_cache_data.eCacheSync, m4u_cache_data.eModuleID); } break; case MTK_M4U_T_CONFIG_PORT : M4U_ASSERT(a_Param); ret = copy_from_user(&m4u_port, (void*)a_Param , sizeof(M4U_PORT_STRUCT)); if(ret) { M4UERR(" MTK_M4U_T_CONFIG_PORT, copy_from_user failed: %d \n", ret); return -EFAULT; } M4UDBG("ePortID=%d, Virtuality=%d, Security=%d, Distance=%d, Direction=%d \n", m4u_port.ePortID, m4u_port.Virtuality, m4u_port.Security, m4u_port.Distance, m4u_port.Direction); ret = m4u_config_port(&m4u_port); break; case MTK_M4U_T_CONFIG_PORT_ROTATOR: M4U_ASSERT(a_Param); ret = copy_from_user(&m4u_port_rotator, (void*)a_Param , sizeof(M4U_PORT_STRUCT_ROTATOR)); if(ret) { M4UERR(" MTK_M4U_T_CONFIG_PORT_ROTATOR, copy_from_user failed: %d \n", ret); return -EFAULT; } ret = m4u_config_port_rotator(&m4u_port_rotator); break; case MTK_M4U_T_CONFIG_ASSERT : // todo break; case MTK_M4U_T_INSERT_WRAP_RANGE : M4U_ASSERT(a_Param); ret = copy_from_user(&m4u_wrap_range, (void*)a_Param , sizeof(M4U_WRAP_DES_T)); if(ret) { M4UERR(" MTK_M4U_T_INSERT_WRAP_RANGE, copy_from_user failed: %d \n", ret); return -EFAULT; } M4UDBG("PortID=%d, eModuleID=%d, MVAStart=0x%x, MVAEnd=0x%x \n", m4u_wrap_range.ePortID, m4u_wrap_range.eModuleID, m4u_wrap_range.MVAStart, m4u_wrap_range.MVAEnd ); ret = m4u_insert_wrapped_range(m4u_wrap_range.eModuleID, m4u_wrap_range.ePortID, m4u_wrap_range.MVAStart, m4u_wrap_range.MVAEnd); break; case MTK_M4U_T_MONITOR_START : M4U_ASSERT(a_Param); ret = copy_from_user(&PortID, (void*)a_Param , sizeof(unsigned int)); if(ret) { M4UERR(" MTK_M4U_T_MONITOR_START, copy_from_user failed, %d\n", ret); return -EFAULT; } ret = m4u_monitor_start(PortID); break; case MTK_M4U_T_MONITOR_STOP : M4U_ASSERT(a_Param); ret = copy_from_user(&PortID, (void*)a_Param , sizeof(unsigned int)); if(ret) { M4UERR(" MTK_M4U_T_MONITOR_STOP, copy_from_user failed, %d\n", ret); return -EFAULT; } ret = m4u_monitor_stop(PortID); break; case MTK_M4U_T_RESET_MVA_RELEASE_TLB : M4U_ASSERT(a_Param); ret = copy_from_user(&ModuleID, (void*)a_Param , sizeof(ModuleID)); if(ret) { M4UERR(" MTK_M4U_T_RESET_MVA_RELEASE_TLB, copy_from_user failed: %d\n", ret); return -EFAULT; } break; case MTK_M4U_T_M4UDrv_CONSTRUCT: mutex_lock(&(pNode->dataMutex)); pNode->isM4uDrvConstruct = 1; mutex_unlock(&(pNode->dataMutex)); break; case MTK_M4U_T_M4UDrv_DECONSTRUCT: mutex_lock(&(pNode->dataMutex)); pNode->isM4uDrvDeconstruct = 1; mutex_unlock(&(pNode->dataMutex)); break; case MTK_M4U_T_DUMP_PAGETABLE: do{ unsigned int mva, va, page_num, size, i; M4U_ASSERT(a_Param); ret = copy_from_user(&m4u_module, (void*)a_Param , sizeof(M4U_MOUDLE_STRUCT)); if(ret) { M4UERR(" MTK_M4U_T_ALLOC_MVA, copy_from_user failed: %d\n", ret); return -EFAULT; } mva = m4u_module.MVAStart; va = m4u_module.BufAddr; size = m4u_module.BufSize; page_num = (size + (va&0xfff))/DEFAULT_PAGE_SIZE; M4UMSG("M4U dump pagetable in ioctl: mva=0x%x, size=0x%x===>\n", mva,size); m4u_dump_pagetable_range(mva, page_num); printk("\n"); M4UMSG("M4U dump PA by VA in ioctl: va=0x%x, size=0x%x===>\n", va,size); printk("0x%08x: ", va); for(i=0; i<page_num; i++) { printk("0x%08x, ", m4u_user_v2p(va+i*M4U_PAGE_SIZE)); if((i+1)%8==0) { printk("\n 0x%08x: ", (va+((i+1)<<12))); } } printk("\n"); M4UMSG("========= compare these automaticly =======>\n"); for(i=0; i<page_num; i++) { unsigned int pa, entry; pa = m4u_user_v2p(va+i*M4U_PAGE_SIZE); entry = *(unsigned int*)mva_pteAddr_nonsec((mva+i*M4U_PAGE_SIZE)); if((pa&(~0xfff)) != (pa&(~0xfff))) { M4UMSG("warning warning!! va=0x%x,mva=0x%x, pa=0x%x,entry=0x%x\n", va+i*M4U_PAGE_SIZE, mva+i*M4U_PAGE_SIZE, pa, entry); } } }while(0); break; case MTK_M4U_T_REGISTER_BUFFER: { mva_info_t *pMvaInfo = NULL; M4U_ASSERT(a_Param); ret = copy_from_user(&m4u_module, (void*)a_Param , sizeof(M4U_MOUDLE_STRUCT)); if(ret) { M4UERR(" MTK_M4U_T_ALLOC_MVA, copy_from_user failed: %d\n", ret); return -EFAULT; } M4ULOG("-MTK_M4U_T_REGISTER_BUF, module_id=%d, BufAddr=0x%x, BufSize=%d \r\n", m4u_module.eModuleID, m4u_module.BufAddr, m4u_module.BufSize ); pMvaInfo->bufAddr = m4u_module.BufAddr; pMvaInfo->mvaStart = 0; pMvaInfo->size = m4u_module.BufSize; pMvaInfo->eModuleId = m4u_module.eModuleID; pMvaInfo->flags = MVA_REGION_REGISTER; pMvaInfo->security = m4u_module.security; pMvaInfo->cache_coherent = m4u_module.cache_coherent; m4u_add_to_garbage_list(a_pstFile, pMvaInfo); } break; case MTK_M4U_T_CACHE_FLUSH_ALL: m4u_dma_cache_flush_all(); break; default : M4UMSG("MTK M4U ioctl : No such command!!\n"); ret = -EINVAL; break; } return ret; } static int MTK_M4U_mmap(struct file * a_pstFile, struct vm_area_struct * a_pstVMArea) { garbage_node_t * pstLog; pstLog = (garbage_node_t *)a_pstFile->private_data; if(NULL == pstLog) { M4UMSG("Private data is null in mmap operation. HOW COULD THIS HAPPEN ??\n"); //return -1; } M4UMSG("MTK_M4U_Mmap, a_pstVMArea=0x%x, vm_start=0x%x, vm_pgoff=0x%x, size=0x%x, vm_page_prot=0x%x \n", (unsigned int)a_pstVMArea , (unsigned int)a_pstVMArea->vm_start , (unsigned int)a_pstVMArea->vm_pgoff , (unsigned int)(a_pstVMArea->vm_end - a_pstVMArea->vm_start) , (unsigned int)a_pstVMArea->vm_page_prot ); { a_pstVMArea->vm_page_prot = pgprot_noncached(a_pstVMArea->vm_page_prot); if(remap_pfn_range(a_pstVMArea , a_pstVMArea->vm_start , a_pstVMArea->vm_pgoff , (a_pstVMArea->vm_end - a_pstVMArea->vm_start) , a_pstVMArea->vm_page_prot)<0) { M4UMSG("MMAP failed!!\n"); return -1; } } return 0; } static const struct file_operations g_stMTK_M4U_fops = { .owner = THIS_MODULE, .open = MTK_M4U_open, .release = MTK_M4U_release, .flush = MTK_M4U_flush, .unlocked_ioctl = MTK_M4U_ioctl, .mmap = MTK_M4U_mmap }; volatile static int gM4u_L2_invalid_range_done = 0; volatile static int gM4u_L2_invalid_all_done = 0; static irqreturn_t MTK_M4U_L2_isr(int irq, void *dev_id) { unsigned int regval = COM_ReadReg32(REG_L2_GPE_STATUS); M4UMSG("L2 interrupt happens!!! irq=%d, status=0x%x\n", irq, regval); //clear interrupt COM_WriteReg32(REG_L2_GPE_STATUS, 0); return IRQ_HANDLED; } static int __m4u_dump_rs_info(unsigned int va[], unsigned int pa[], unsigned int st[]) { int i; M4UINFO("m4u dump RS information =====>\n"); M4UINFO("mva valid port-id pa larb-id write other-status \n"); for(i=0; i<MMU_TOTAL_RS_NR; i++) { M4UINFO("0x%-8x %d %-2d 0x%-8x %d %d 0x%-8x", F_MMU_RSx_VA_GET(va[i]), F_MMU_RSx_VA_VALID(va[i]), F_MMU_RSx_VA_PID(va[i]), pa[i], F_MMU_RSx_ST_LID(st[i]), F_MMU_RSx_ST_WRT(st[i]), F_MMU_RSx_ST_OTHER(st[i]) ); } M4UINFO("m4u dump RS information done =====>\n"); return 0; } static int m4u_dump_rs_info(int m4u_index) { unsigned int m4u_base = gM4UBaseAddr[m4u_index]; int i; unsigned int va[MMU_TOTAL_RS_NR], pa[MMU_TOTAL_RS_NR], st[MMU_TOTAL_RS_NR]; for(i=0; i<MMU_TOTAL_RS_NR; i++) { va[i] = ioread32((void*)(m4u_base+REG_MMU_RSx_VA(i))); pa[i] = ioread32((void*)(m4u_base+REG_MMU_RSx_PA(i))); st[i] = ioread32((void*)(m4u_base+REG_MMU_RSx_ST(i))); } mb(); __m4u_dump_rs_info(va, pa, st); return 0; } static irqreturn_t MTK_M4U_isr(int irq, void *dev_id) { unsigned int m4u_base, m4u_index; unsigned int IntrSrc, faultMva, port_regval, i; int portID, larbID; unsigned int main_tags[M4U_MAIN_TLB_NR]; unsigned int pfh_tags[M4U_MAIN_TLB_NR]; unsigned int rs_va[MMU_TOTAL_RS_NR], rs_pa[MMU_TOTAL_RS_NR], rs_st[MMU_TOTAL_RS_NR]; m4u_index = irq-MT6589_MMU0_IRQ_ID; m4u_base = gM4UBaseAddr[m4u_index]; IntrSrc = M4U_ReadReg32(m4u_base, REG_MMU_FAULT_ST) & 0xFF; faultMva = M4U_ReadReg32(m4u_base, REG_MMU_FAULT_VA); port_regval = M4U_ReadReg32(m4u_base, REG_MMU_INT_ID); for(i=0; i<M4U_MAIN_TLB_NR; i++) { main_tags[i] = ioread32((void*)(m4u_base+REG_MMU_MAIN_TAG(i))); pfh_tags[i] = ioread32((void*)(m4u_base+REG_MMU_PFH_TAG(i))); } if(g_debug_dump_rs_in_isr) { for(i=0; i<MMU_TOTAL_RS_NR; i++) { rs_va[i] = ioread32((void*)(m4u_base+REG_MMU_RSx_VA(i))); rs_pa[i] = ioread32((void*)(m4u_base+REG_MMU_RSx_PA(i))); rs_st[i] = ioread32((void*)(m4u_base+REG_MMU_RSx_ST(i))); } } mb(); //m4u_disable_error_hang(m4u_index); if(0==IntrSrc) { M4UMSG("warning: MTK_M4U_isr, larbID=%d, but REG_MMU_FAULT_ST=0x0 \n", m4u_index); m4u_clear_intr(m4u_base); return IRQ_HANDLED; } if(IntrSrc&F_INT_TRANSLATION_FAULT) { unsigned int *faultPTE; M4U_PORT_ID_ENUM m4u_port; if(g_debug_recover_pagetable_TF) { if(faultMva<M4U_MVA_MAX ) { faultPTE = mva_pteAddr(faultMva); if(!(*faultPTE & F_DESC_VALID)) *faultPTE = gM4U_align_page_pa|F_DESC_VALID; //to-do: add secure solution m4u_invalid_tlb_all(m4u_index, gM4U_L2_enable); } } portID = F_INT_ID_TF_PORT_ID(port_regval); larbID = 3-F_INT_ID_TF_LARB_ID(port_regval); m4u_port = larb_port_2_m4u_port(larbID, portID); { MMProfileLogEx(M4U_MMP_Events[PROFILE_M4U_ERROR], MMProfileFlagPulse, m4u_port, faultMva); m4u_aee_print("\nCRDISPATCH_KEY:%s\ntranslation fault: larb=%d,module=%s,port=%s,mva=0x%x\n", m4u_get_port_name(m4u_port), larbID, m4u_get_module_name(m4u_port_2_module(m4u_port)), m4u_get_port_name(m4u_port), faultMva); } M4UMSG("translation fault: larb=%d,port=%s, fault_mva=0x%x\n", larbID, m4u_get_port_name(larb_port_2_m4u_port(larbID, portID)), faultMva); if(faultMva<M4U_MVA_MAX-0x1000 && faultMva>0x40000) { unsigned int *pteStart; pteStart = mva_pteAddr(faultMva); M4UINFO("pagetable @ 0x%x: 0x%x,0x%x,0x%x\n",faultMva,pteStart[-1], pteStart[0],pteStart[1]); } m4u_dump_user_addr_register(m4u_port); //search invalid main tlb { unsigned int mva;// des; M4UINFO("search main tlb=>\n"); for(i=0;i<M4U_MAIN_TLB_NR;i++) { mva = main_tags[i]; if((mva&(F_MAIN_TLB_VALID_BIT|F_MAIN_TLB_INV_DES_BIT)) == (F_MAIN_TLB_VALID_BIT|F_MAIN_TLB_INV_DES_BIT) ) { unsigned int des = m4u_get_main_descriptor(m4u_base,i); printk(KERN_INFO"%d:0x%x, 0x%x", i,mva&F_MAIN_TLB_VA_MSK, des); } } //printk(KERN_INFO"\n"); } //search invalid pfh tlb { unsigned int tag,tag_valid,des_invalid; M4UINFO("search pfh tlb=>\n"); for(i=0;i<M4U_MAIN_TLB_NR;i++) { tag = pfh_tags[i]; tag_valid = F_PFH_TAG_VALID(tag); des_invalid = F_PFH_TAG_DESC_VALID(tag); if((tag_valid & des_invalid)!=0) { printk(KERN_INFO"%d:0x%x, 0x%x 0x%x 0x%x 0x%x", i,tag, m4u_get_pfh_descriptor(m4u_base,i,0), m4u_get_pfh_descriptor(m4u_base,i,1), m4u_get_pfh_descriptor(m4u_base,i,2), m4u_get_pfh_descriptor(m4u_base,i,3)); } } //printk(KERN_INFO"\n"); } m4u_dump_main_tlb_des(m4u_index); m4u_dump_pfh_tlb_des(m4u_index); if(g_debug_dump_rs_in_isr) __m4u_dump_rs_info(rs_va, rs_pa, rs_st); } if(IntrSrc&F_INT_TLB_MULTI_HIT_FAULT) { m4u_dump_main_tlb_des(m4u_index); m4u_dump_pfh_tlb_des(m4u_index); M4UERR("multi-hit error! \n"); } if(IntrSrc&F_INT_INVALID_PHYSICAL_ADDRESS_FAULT) { if(!(IntrSrc&F_INT_TRANSLATION_FAULT)) { if(faultMva<M4U_MVA_MAX-0x1000 && faultMva>0x40000) { unsigned int *pteStart; pteStart = mva_pteAddr(faultMva); M4UINFO("pagetable @ 0x%x: 0x%x,0x%x,0x%x\n",faultMva,pteStart[-1], pteStart[0],pteStart[1]); } m4u_aee_print("invalid PA:0x%x->0x%x\n", faultMva, M4U_ReadReg32(m4u_base, REG_MMU_INVLD_PA)); m4u_dump_main_tlb_des(m4u_index); m4u_dump_pfh_tlb_des(m4u_index); } else { M4UMSG("invalid PA:0x%x->0x%x\n", faultMva, M4U_ReadReg32(m4u_base, REG_MMU_INVLD_PA)); } } if(IntrSrc&F_INT_ENTRY_REPLACEMENT_FAULT) { unsigned char lock_cnt[M4U_CLNTMOD_MAX] = {0}; M4UERR("error: Entry replacement fault! No free TLB, TLB are locked by: "); for(i=0;i<g4M4UTagCount[m4u_index];i++) { lock_cnt[mva2module(M4U_ReadReg32(m4u_base, REG_MMU_MAIN_TAG(i))&(~0xfff))]++; } for(i=0;i<M4U_CLNTMOD_MAX;i++) { if(0!=lock_cnt[i]) { printk("%s(lock=%d), ", m4u_get_module_name(i), lock_cnt[i]); } } printk("\n"); } if(IntrSrc&F_INT_TABLE_WALK_FAULT) { M4UERR("error: Table walk fault! pageTable start addr:0x%x, 0x%x\n", pt_pa_nonsec, pt_pa_sec); } if(IntrSrc&F_INT_TLB_MISS_FAULT) { M4UERR("error: TLB miss fault! \n"); } if(IntrSrc&F_INT_PFH_DMA_FIFO_OVERFLOW) { M4UERR("error: Prefetch DMA fifo overflow fault! \n"); } m4u_print_active_port(m4u_index); if(g_debug_print_detail_in_isr) m4u_dump_mva_info(); m4u_invalid_tlb_all(m4u_index, gM4U_L2_enable); m4u_clear_intr(m4u_base); if(g_debug_enable_error_hang) m4u_enable_error_hang(m4u_index); return IRQ_HANDLED; } static unsigned int SMI_reg_init(void) { return 0; } //static struct class *pM4uClass = NULL; //static struct device* m4uDevice = NULL; static int m4u_probe(struct platform_device *pdev) { struct proc_dir_entry *m4u_entry; M4UMSG("MTK_M4U_Init\n"); /* int ret; ret = register_chrdev_region(g_MTKM4Udevno, 1, M4U_DEVNAME); if(ret) M4UMSG("error: can't get major number for m4u device\n"); else M4UMSG("Get M4U Device Major number (%d)\n", ret); g_pMTKM4U_CharDrv = cdev_alloc(); g_pMTKM4U_CharDrv->owner = THIS_MODULE; g_pMTKM4U_CharDrv->ops = &g_stMTK_M4U_fops; ret = cdev_add(g_pMTKM4U_CharDrv, g_MTKM4Udevno, 1); //create /dev/M4U_device automaticly pM4uClass = class_create(THIS_MODULE, M4U_DEVNAME); if (IS_ERR(pM4uClass)) { int ret = PTR_ERR(pM4uClass); M4UMSG("Unable to create class, err = %d", ret); return ret; } m4uDevice = device_create(pM4uClass, NULL, g_MTKM4Udevno, NULL, M4U_DEVNAME); */ m4u_entry = proc_create("M4U_device", 0, NULL, &g_stMTK_M4U_fops); if(!m4u_entry) { M4UMSG("m4u:failed to register m4u in proc/M4U_device.\n"); return -ENODEV; } pmodule_current_size = (int*)kmalloc(M4U_CLIENT_MODULE_NUM*4, GFP_KERNEL|__GFP_ZERO); pmodule_max_size = (int*)kmalloc(M4U_CLIENT_MODULE_NUM*4, GFP_KERNEL|__GFP_ZERO); pmodule_locked_pages = (int*)kmalloc(M4U_CLIENT_MODULE_NUM*4, GFP_KERNEL|__GFP_ZERO); spin_lock_init(&gM4u_reg_lock); m4u_struct_init(); //init related structures m4u_mvaGraph_init(); // add SMI reg init here SMI_reg_init(); //Set IRQ if(request_irq(MT6589_MMU0_IRQ_ID , MTK_M4U_isr, IRQF_TRIGGER_LOW, M4U_DEVNAME , NULL)) { M4UERR("request M4U0 IRQ line failed\n"); return -ENODEV; } if(request_irq(MT6589_MMU1_IRQ_ID , MTK_M4U_isr, IRQF_TRIGGER_LOW, M4U_DEVNAME , NULL)) { M4UERR("request M4U1 IRQ line failed\n"); return -ENODEV; } if(request_irq(MT6589_MMU_L2_IRQ_ID , MTK_M4U_L2_isr, IRQF_TRIGGER_LOW, M4U_DEVNAME , NULL)) { M4UERR("request M4U2 IRQ line failed\n"); return -ENODEV; } disable_irq(MT6589_MMU_L2_IRQ_ID); if(request_irq(MT6589_MMU_L2_SEC_IRQ_ID , MTK_M4U_L2_isr, IRQF_TRIGGER_LOW, M4U_DEVNAME , NULL)) { M4UERR("request M4U2 IRQ line failed\n"); return -ENODEV; } disable_irq(MT6589_MMU_L2_SEC_IRQ_ID); M4UMSG("init done\n"); m4u_profile_init(); m4u_cache_sync_init(); return 0; } static int m4u_remove(struct platform_device *pdev) { M4UDBG("MT6577_M4U_Exit() \n"); cdev_del(g_pMTKM4U_CharDrv); unregister_chrdev_region(g_MTKM4Udevno, 1); //Release IRQ free_irq(MT6589_MMU0_IRQ_ID , NULL); free_irq(MT6589_MMU1_IRQ_ID , NULL); free_irq(MT6589_MMU_L2_IRQ_ID , NULL); return 0; } static int m4u_confirm_range_invalidated(int m4u_index, unsigned int MVAStart, unsigned int MVAEnd) { unsigned int i; unsigned int regval; unsigned int m4u_base = gM4UBaseAddr[m4u_index]; int result = 0; M4ULOG("m4u_confirm_range_invalidated, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x \n", m4u_index, MVAStart, MVAEnd); if(gTestLevel==M4U_TEST_LEVEL_USER) { return 0; } ///> check Main TLB part for(i=0;i<g4M4UTagCount[m4u_index];i++) { regval = M4U_ReadReg32(m4u_base, REG_MMU_MAIN_TAG(i)); if(regval & (F_MAIN_TLB_VALID_BIT)) { unsigned int mva = regval & F_MAIN_TLB_VA_MSK; unsigned int sa_align = MVAStart & F_MAIN_TLB_VA_MSK; unsigned int ea_align = MVAEnd & F_MAIN_TLB_VA_MSK; if(mva>=sa_align && mva<=ea_align) { if(gTestLevel==M4U_TEST_LEVEL_STRESS) { M4UERR("main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n", i, m4u_index, MVAStart, MVAEnd, regval); m4u_dump_reg(m4u_index); } else if(gTestLevel==M4U_TEST_LEVEL_ENG) { M4UMSG("main: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x \n", i, m4u_index, MVAStart, MVAEnd, regval); } result = -1; } } } if(result < 0) return result; ///> check Prefetch TLB part for(i=0;i<g4M4UTagCount[m4u_index];i++) { regval = M4U_ReadReg32(m4u_base, REG_MMU_PFH_TAG(i)); if(regval & F_PFH_TAG_VALID_MSK) ///> a valid Prefetch TLB entry { unsigned int mva = regval & F_PFH_TAG_VA_MSK; unsigned int sa_align = MVAStart& F_PFH_TAG_VA_MSK; unsigned int ea_align = MVAEnd & F_PFH_TAG_VA_MSK; if(mva>=sa_align && mva<=ea_align) { if(gTestLevel==M4U_TEST_LEVEL_STRESS) { M4UERR("prefetch: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n", i, m4u_index, MVAStart, MVAEnd, regval); m4u_dump_reg(m4u_index); } else if(gTestLevel==M4U_TEST_LEVEL_ENG) { M4UMSG("prefetch: i=%d, idx=0x%x, MVAStart=0x%x, MVAEnd=0x%x, RegValue=0x%x\n", i, m4u_index, MVAStart, MVAEnd, regval); } result = -1; } } } return result; } /** * @brief , * @param , tlbSelect 0:main tlb, 1:pre-fetch tlb LSB, 2:pre-fetch tlb MSB * @return */ static unsigned int m4u_get_main_descriptor(unsigned int m4u_base, unsigned int idx) { unsigned int regValue=0; regValue = F_READ_ENTRY_TLB_SEL_MAIN \ | F_READ_ENTRY_INDEX_VAL(idx)\ | F_READ_ENTRY_READ_EN_BIT; M4U_WriteReg32(m4u_base, REG_MMU_READ_ENTRY, regValue); return M4U_ReadReg32(m4u_base, REG_MMU_DES_RDATA); } static unsigned int m4u_get_pfh_descriptor(unsigned int m4u_base, int tlbIndex, int tlbSelect) { unsigned regValue=0; M4U_ASSERT(tlbSelect<4); regValue = F_READ_ENTRY_TLB_SEL_PFH \ | F_READ_ENTRY_INDEX_VAL(tlbIndex)\ | F_READ_ENTRY_PFH_IDX(tlbSelect)\ | F_READ_ENTRY_READ_EN_BIT; M4U_WriteReg32(m4u_base, REG_MMU_READ_ENTRY, regValue); return M4U_ReadReg32(m4u_base, REG_MMU_DES_RDATA); } static int m4u_search_main_invalid(int m4u_id) { unsigned int i=0; unsigned int m4u_base = gM4UBaseAddr[m4u_id]; unsigned int mva;// des; M4UINFO("search main tlb=>\n"); for(i=0;i<M4U_MAIN_TLB_NR;i++) { mva = M4U_ReadReg32(m4u_base, REG_MMU_MAIN_TAG(i)); if((mva&(F_MAIN_TLB_VALID_BIT|F_MAIN_TLB_INV_DES_BIT)) == (F_MAIN_TLB_VALID_BIT|F_MAIN_TLB_INV_DES_BIT) ) { //des = m4u_get_main_descriptor(m4u_base,i); printk(KERN_INFO"%d:0x%x ", i,mva); } } return 0; } static int m4u_dump_main_tlb_des(int m4u_id) { // M4U related unsigned int i=0; unsigned int m4u_base = gM4UBaseAddr[m4u_id]; M4UINFO("dump main tlb=======>\n"); for(i=0;i<M4U_MAIN_TLB_NR;i++) { printk(KERN_INFO"%d:0x%x:0x%x ", i, M4U_ReadReg32(m4u_base, REG_MMU_MAIN_TAG(i)), m4u_get_main_descriptor(m4u_base,i)); if((i+1)%8==0) printk(KERN_INFO"\n"); } return 0; } static int m4u_dump_main_tlb_tags(int m4u_id) { // M4U related unsigned int i=0; unsigned int m4u_base = gM4UBaseAddr[m4u_id]; M4UINFO("dump main tlb=======>\n"); for(i=0;i<M4U_MAIN_TLB_NR;i++) { M4UINFO("0x%x ", M4U_ReadReg32(m4u_base, REG_MMU_MAIN_TAG(i))); if((i+1)%8==0) printk(KERN_INFO"\n"); } return 0; } static int m4u_dump_pfh_tlb_tags(int m4u_id) { unsigned int i=0; unsigned int m4u_base = gM4UBaseAddr[m4u_id]; M4UINFO("dump pfh tags=======>\n"); for(i=0;i<M4U_PRE_TLB_NR;i++) { printk(KERN_INFO"0x%x ", M4U_ReadReg32(m4u_base, REG_MMU_PFH_TAG(i))); if((i+1)%8==0) printk(KERN_INFO"\n"); } printk(KERN_INFO"\n"); return 0; } static int m4u_dump_pfh_tlb_des(int m4u_id) { // M4U related unsigned int i=0; unsigned int m4u_base = gM4UBaseAddr[m4u_id]; M4UINFO("dump main tlb=======>\n"); for(i=0;i<M4U_PRE_TLB_NR;i++) { printk(KERN_INFO"%d:0x%x:0x%x,0x%x,0x%x,0x%x ", i, M4U_ReadReg32(m4u_base, REG_MMU_PFH_TAG(i)), m4u_get_pfh_descriptor(m4u_base,i, 0), m4u_get_pfh_descriptor(m4u_base,i, 1), m4u_get_pfh_descriptor(m4u_base,i, 2), m4u_get_pfh_descriptor(m4u_base,i, 3) ); if((i+1)%4==0) printk(KERN_INFO"\n"); } return 0; } static void m4u_dump_pagetable_range(unsigned int mvaStart, unsigned int nr) { unsigned int *pteStart; int i; pteStart = mva_pteAddr(mvaStart); mvaStart &= ~0xfff; // printk("m4u dump pagetable by range: start=0x%x, nr=%d ==============>\n", vaStart, nr); // printk("index : mva : PTE\n"); printk(KERN_INFO"\n 0x%08x: ", mvaStart); for(i=0; i<nr; i++) { printk(KERN_INFO"0x%08x, ", pteStart[i]); if((i+1)%8==0) { printk(KERN_INFO"\n 0x%08x: ", (mvaStart+((i+1)<<12))); } } printk(KERN_INFO"\n"); // printk("m4u dump pagetable done : start=0x%x, nr=%d ==============<\n", vaStart, nr); } static int m4u_dump_pagetable(M4U_MODULE_ID_ENUM eModuleID) { unsigned int addr=0; short index=1, nr=0; unsigned long irq_flags; printk("[M4U_K] dump pagetable by module: %s, page_num=%d ========>\n", m4u_get_module_name(eModuleID), pmodule_locked_pages[eModuleID]); // this function may be called in ISR spin_lock_irqsave(&gMvaGraph_lock, irq_flags); for(index=1; index<MVA_MAX_BLOCK_NR+1; index += nr) { addr = index << MVA_BLOCK_SIZE_ORDER; nr = MVA_GET_NR(index); if(MVA_IS_BUSY(index) && (mvaInfoGraph[index]->eModuleId) == eModuleID) { // printk("start a mva region for module %d===>\n", eModuleID); m4u_dump_pagetable_range(addr, ((nr<<MVA_BLOCK_SIZE_ORDER)>>12)); } } spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags); printk("[M4U_K] dump pagetable by module done =========================<\n"); return 0; } static int m4u_dump_mva_info() { short index=1, nr=0; unsigned int addr=0; mva_info_t *pMvaInfo; unsigned long irq_flags; spin_lock_irqsave(&gMvaGraph_lock, irq_flags); M4UINFO(" dump mva allocated info ========>\n"); M4UINFO("mva_start mva_end va size block_end module block_num sec snoop \n"); for(index=1; index<MVA_MAX_BLOCK_NR+1; index += nr) { addr = index << MVA_BLOCK_SIZE_ORDER; nr = MVA_GET_NR(index); if(MVA_IS_BUSY(index)) { pMvaInfo = mvaInfoGraph[index]; M4UINFO("0x%-8x, 0x%-8x, 0x%-8x, 0x%-8x, 0x%-8x, %s, %d, %d, %d\n", pMvaInfo->mvaStart, pMvaInfo->mvaStart+pMvaInfo->size-1, pMvaInfo->bufAddr, pMvaInfo->size, addr+nr*MVA_BLOCK_SIZE, m4u_get_module_name(pMvaInfo->eModuleId), nr, pMvaInfo->security, pMvaInfo->cache_coherent); } else { M4UINFO("%s, 0x%-8x, 0x%-8x, %d\n", "free", addr, addr+nr*MVA_BLOCK_SIZE, nr); } } M4UINFO(" dump mva allocated info done ========>\n"); spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags); return 0; } static int m4u_clock_on(void) { enable_clock(MT_CG_INFRA_M4U, "m4u"); enable_clock(MT_CG_INFRA_SMI, "m4u"); return 0; } static int m4u_clock_off(void) { disable_clock(MT_CG_INFRA_M4U, "m4u"); disable_clock(MT_CG_INFRA_SMI, "m4u"); return 0; } static void m4u_mvaGraph_init(void) { unsigned long irq_flags; spin_lock_irqsave(&gMvaGraph_lock, irq_flags); memset(mvaGraph, 0, sizeof(short)*(MVA_MAX_BLOCK_NR+1)); memset(mvaInfoGraph, 0, sizeof(mva_info_t*)*(MVA_MAX_BLOCK_NR+1)); mvaGraph[0] = 1|MVA_BUSY_MASK; mvaInfoGraph[0] = &gMvaNode_unkown; mvaGraph[1] = MVA_MAX_BLOCK_NR; mvaInfoGraph[1] = &gMvaNode_unkown; mvaGraph[MVA_MAX_BLOCK_NR] = MVA_MAX_BLOCK_NR; mvaInfoGraph[MVA_MAX_BLOCK_NR] = &gMvaNode_unkown; spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags); } static void m4u_mvaGraph_dump_raw(void) { int i; unsigned long irq_flags; spin_lock_irqsave(&gMvaGraph_lock, irq_flags); printk("[M4U_K] dump raw data of mvaGraph:============>\n"); for(i=0; i<MVA_MAX_BLOCK_NR+1; i++) printk("0x%4x: 0x%08x ID:%d\n", i, mvaGraph[i], mvaInfoGraph[i]->eModuleId); spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags); } static void m4u_mvaGraph_dump(void) { unsigned int addr=0, size=0; short index=1, nr=0; M4U_MODULE_ID_ENUM moduleID; char *pMvaFree = "FREE"; char *pErrorId = "ERROR"; char *pOwner = NULL; int i,max_bit; short frag[12] = {0}; short nr_free=0, nr_alloc=0; unsigned long irq_flags; printk("[M4U_K] mva allocation info dump:====================>\n"); printk("start size blocknum owner \n"); spin_lock_irqsave(&gMvaGraph_lock, irq_flags); for(index=1; index<MVA_MAX_BLOCK_NR+1; index += nr) { addr = index << MVA_BLOCK_SIZE_ORDER; nr = MVA_GET_NR(index); size = nr << MVA_BLOCK_SIZE_ORDER; if(MVA_IS_BUSY(index)) { moduleID = mvaInfoGraph[index]->eModuleId; if(moduleID > M4U_CLIENT_MODULE_NUM-1) pOwner = pErrorId; else pOwner = m4u_get_module_name(moduleID); nr_alloc += nr; } else // mva region is free { pOwner = pMvaFree; nr_free += nr; max_bit=0; for(i=0; i<12; i++) { if(nr & (1<<i)) max_bit = i; } frag[max_bit]++; } printk("0x%08x 0x%08x %4d %s\n", addr, size, nr, pOwner); } spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags); printk("\n"); printk("[M4U_K] mva alloc summary: (unit: blocks)========================>\n"); printk("free: %d , alloc: %d, total: %d \n", nr_free, nr_alloc, nr_free+nr_alloc); printk("[M4U_K] free region fragments in 2^x blocks unit:===============\n"); printk(" 0 1 2 3 4 5 6 7 8 9 10 11 \n"); printk("%4d %4d %4d %4d %4d %4d %4d %4d %4d %4d %4d %4d \n", frag[0],frag[1],frag[2],frag[3],frag[4],frag[5],frag[6],frag[7],frag[8],frag[9],frag[10],frag[11]); printk("[M4U_K] mva alloc dump done=========================<\n"); } M4U_MODULE_ID_ENUM mva2module(unsigned int mva) { M4U_MODULE_ID_ENUM eModuleId = M4U_PORT_UNKNOWN; int index; unsigned long irq_flags; index = MVAGRAPH_INDEX(mva); if(index==0 || index>MVA_MAX_BLOCK_NR) { M4UMSG("mvaGraph index is 0. mva=0x%x\n", mva); return M4U_PORT_UNKNOWN; } spin_lock_irqsave(&gMvaGraph_lock, irq_flags); //find prev head/tail of this region while(mvaGraph[index]==0) index--; if(MVA_IS_BUSY(index)) { eModuleId = mvaInfoGraph[index]->eModuleId; goto out; } else { eModuleId = M4U_PORT_UNKNOWN; goto out; } out: spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags); return eModuleId; } static unsigned int m4u_do_mva_alloc(M4U_MODULE_ID_ENUM eModuleID, const unsigned int BufAddr, const unsigned int BufSize, mva_info_t *pMvaInfo) { short s,end; short new_start, new_end; short nr = 0; unsigned int mvaRegionStart; unsigned int startRequire, endRequire, sizeRequire; unsigned long irq_flags; if(BufSize == 0) return 0; MMProfileLogEx(M4U_MMP_Events[PROFILE_ALLOC_MVA_REGION], MMProfileFlagStart, eModuleID, BufAddr); ///----------------------------------------------------- ///calculate mva block number startRequire = BufAddr & (~M4U_PAGE_MASK); endRequire = (BufAddr+BufSize-1)| M4U_PAGE_MASK; sizeRequire = endRequire-startRequire+1; nr = (sizeRequire+MVA_BLOCK_ALIGN_MASK)>>MVA_BLOCK_SIZE_ORDER;//(sizeRequire>>MVA_BLOCK_SIZE_ORDER) + ((sizeRequire&MVA_BLOCK_ALIGN_MASK)!=0); spin_lock_irqsave(&gMvaGraph_lock, irq_flags); ///----------------------------------------------- ///find first match free region for(s=1; (s<(MVA_MAX_BLOCK_NR+1))&&(mvaGraph[s]<nr); s+=(mvaGraph[s]&MVA_BLOCK_NR_MASK)) ; if(s > MVA_MAX_BLOCK_NR) { spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags); M4UMSG("mva_alloc error: no available MVA region for %d blocks!\n", nr); return 0; } ///----------------------------------------------- ///alloc a mva region end = s + mvaGraph[s] - 1; if(unlikely(nr == mvaGraph[s])) { MVA_SET_BUSY(s); MVA_SET_BUSY(end); mvaInfoGraph[s] = pMvaInfo; mvaInfoGraph[end] = pMvaInfo; } else { new_end = s + nr - 1; new_start = new_end + 1; //note: new_start may equals to end mvaGraph[new_start] = (mvaGraph[s]-nr); mvaGraph[new_end] = nr | MVA_BUSY_MASK; mvaGraph[s] = mvaGraph[new_end]; mvaGraph[end] = mvaGraph[new_start]; mvaInfoGraph[s] = pMvaInfo; mvaInfoGraph[new_end] = pMvaInfo; } spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags); mvaRegionStart = (unsigned int)s; MMProfileLogEx(M4U_MMP_Events[PROFILE_ALLOC_MVA_REGION], MMProfileFlagEnd, eModuleID, BufSize); return (mvaRegionStart<<MVA_BLOCK_SIZE_ORDER) + mva_pageOffset(BufAddr); } #define RightWrong(x) ( (x) ? "correct" : "error") static int m4u_do_mva_free(M4U_MODULE_ID_ENUM eModuleID, const unsigned int BufAddr, const unsigned int BufSize, unsigned int mvaRegionStart) { short startIdx = mvaRegionStart >> MVA_BLOCK_SIZE_ORDER; short nr = mvaGraph[startIdx] & MVA_BLOCK_NR_MASK; short endIdx = startIdx + nr - 1; unsigned int startRequire, endRequire, sizeRequire; short nrRequire; mva_info_t * pMvaInfo = NULL; unsigned long irq_flags; spin_lock_irqsave(&gMvaGraph_lock, irq_flags); ///-------------------------------- ///check the input arguments ///right condition: startIdx is not NULL && region is busy && right module && right size startRequire = BufAddr & (~M4U_PAGE_MASK); endRequire = (BufAddr+BufSize-1)| M4U_PAGE_MASK; sizeRequire = endRequire-startRequire+1; nrRequire = (sizeRequire+MVA_BLOCK_ALIGN_MASK)>>MVA_BLOCK_SIZE_ORDER;//(sizeRequire>>MVA_BLOCK_SIZE_ORDER) + ((sizeRequire&MVA_BLOCK_ALIGN_MASK)!=0); if(!( startIdx != 0 //startIdx is not NULL && MVA_IS_BUSY(startIdx) // region is busy && (mvaInfoGraph[startIdx]->eModuleId==eModuleID) //right module && (nr==nrRequire) //right size ) ) { spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags); m4u_aee_print("free mva error, larb=%d, module=%s\n", m4u_module_2_larb(eModuleID), m4u_get_module_name(eModuleID)); M4UMSG("error to free mva========================>\n"); M4UMSG("ModuleID=%s (expect %s) [%s]\n", m4u_get_module_name(eModuleID), m4u_get_module_name(mvaInfoGraph[startIdx]->eModuleId), RightWrong(eModuleID==(mvaInfoGraph[startIdx]->eModuleId))); M4UMSG("BufSize=%d(unit:0x%xBytes) (expect %d) [%s]\n", nrRequire, MVA_BLOCK_SIZE, nr, RightWrong(nrRequire==nr)); M4UMSG("mva=0x%x, (IsBusy?)=%d (expect %d) [%s]\n", mvaRegionStart, MVA_IS_BUSY(startIdx),1, RightWrong(MVA_IS_BUSY(startIdx))); m4u_mvaGraph_dump(); //m4u_mvaGraph_dump_raw(); return -1; } pMvaInfo = mvaInfoGraph[startIdx]; mvaInfoGraph[startIdx] = NULL; mvaInfoGraph[endIdx] = NULL; ///-------------------------------- ///merge with followed region if( (endIdx+1 <= MVA_MAX_BLOCK_NR)&&(!MVA_IS_BUSY(endIdx+1))) { nr += mvaGraph[endIdx+1]; mvaGraph[endIdx] = 0; mvaGraph[endIdx+1] = 0; } ///-------------------------------- ///merge with previous region if( (startIdx-1>0)&&(!MVA_IS_BUSY(startIdx-1)) ) { int pre_nr = mvaGraph[startIdx-1]; mvaGraph[startIdx] = 0; mvaGraph[startIdx-1] = 0; startIdx -= pre_nr; nr += pre_nr; } ///-------------------------------- ///set region flags mvaGraph[startIdx] = nr; mvaGraph[startIdx+nr-1] = nr; spin_unlock_irqrestore(&gMvaGraph_lock, irq_flags); if(pMvaInfo!=NULL) m4u_free_garbage_list(pMvaInfo); return 0; } static void m4u_profile_init(void) { MMP_Event M4U_Event; M4U_Event = MMProfileRegisterEvent(MMP_RootEvent, "M4U"); M4U_MMP_Events[PROFILE_ALLOC_MVA] = MMProfileRegisterEvent(M4U_Event, "Alloc MVA"); M4U_MMP_Events[PROFILE_ALLOC_MVA_REGION] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_ALLOC_MVA], "Alloc MVA Region"); M4U_MMP_Events[PROFILE_GET_PAGES] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_ALLOC_MVA], "Get Pages"); M4U_MMP_Events[PROFILE_FOLLOW_PAGE] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_GET_PAGES], "Follow Page"); M4U_MMP_Events[PROFILE_FORCE_PAGING] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_GET_PAGES], "Force Paging"); M4U_MMP_Events[PROFILE_MLOCK] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_GET_PAGES], "MLock"); M4U_MMP_Events[PROFILE_ALLOC_FLUSH_TLB] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_ALLOC_MVA], "Alloc Flush TLB"); M4U_MMP_Events[PROFILE_DEALLOC_MVA] = MMProfileRegisterEvent(M4U_Event, "DeAlloc MVA"); M4U_MMP_Events[PROFILE_RELEASE_PAGES] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_DEALLOC_MVA], "Release Pages"); M4U_MMP_Events[PROFILE_MUNLOCK] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_RELEASE_PAGES], "MUnLock"); M4U_MMP_Events[PROFILE_PUT_PAGE] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_RELEASE_PAGES], "Put Page"); M4U_MMP_Events[PROFILE_RELEASE_MVA_REGION] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_DEALLOC_MVA], "Release MVA Region"); M4U_MMP_Events[PROFILE_QUERY] = MMProfileRegisterEvent(M4U_Event, "Query MVA"); M4U_MMP_Events[PROFILE_INSERT_TLB] = MMProfileRegisterEvent(M4U_Event, "Insert TLB"); M4U_MMP_Events[PROFILE_DMA_MAINT_ALL] = MMProfileRegisterEvent(M4U_Event, "Cache Maintain"); M4U_MMP_Events[PROFILE_DMA_CLEAN_RANGE] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_DMA_MAINT_ALL], "Clean Range"); M4U_MMP_Events[PROFILE_DMA_CLEAN_ALL] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_DMA_MAINT_ALL], "Clean All"); M4U_MMP_Events[PROFILE_DMA_INVALID_RANGE] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_DMA_MAINT_ALL], "Invalid Range"); M4U_MMP_Events[PROFILE_DMA_INVALID_ALL] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_DMA_MAINT_ALL], "Invalid All"); M4U_MMP_Events[PROFILE_DMA_FLUSH_RANGE] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_DMA_MAINT_ALL], "Flush Range"); M4U_MMP_Events[PROFILE_DMA_FLUSH_ALL] = MMProfileRegisterEvent(M4U_MMP_Events[PROFILE_DMA_MAINT_ALL], "Flush All"); M4U_MMP_Events[PROFILE_CACHE_FLUSH_ALL] = MMProfileRegisterEvent(M4U_Event, "Cache Flush All"); M4U_MMP_Events[PROFILE_CONFIG_PORT] = MMProfileRegisterEvent(M4U_Event, "Config Port"); M4U_MMP_Events[PROFILE_MAIN_TLB_MON] = MMProfileRegisterEvent(M4U_Event, "Main TLB Monitor"); M4U_MMP_Events[PROFILE_PREF_TLB_MON] = MMProfileRegisterEvent(M4U_Event, "PreFetch TLB Monitor"); M4U_MMP_Events[PROFILE_M4U_REG] = MMProfileRegisterEvent(M4U_Event, "M4U Registers"); M4U_MMP_Events[PROFILE_M4U_ERROR] = MMProfileRegisterEvent(M4U_Event, "M4U ERROR"); MMProfileEnableEvent(M4U_MMP_Events[PROFILE_M4U_ERROR], 1); MMProfileEnableEvent(M4U_MMP_Events[PROFILE_ALLOC_MVA], 1); MMProfileEnableEvent(M4U_MMP_Events[PROFILE_DEALLOC_MVA], 1); MMProfileEnableEvent(M4U_MMP_Events[PROFILE_DMA_MAINT_ALL], 1); } // query mva by va int __m4u_query_mva(M4U_MODULE_ID_ENUM eModuleID, const unsigned int BufAddr, const unsigned int BufSize, unsigned int *pRetMVABuf, struct file * a_pstFile) { struct list_head *pListHead; mva_info_t *pList = NULL; garbage_node_t *pNode = (garbage_node_t*)(a_pstFile->private_data); unsigned int query_start = BufAddr; unsigned int query_end = BufAddr + BufSize - 1; unsigned int s,e; int ret, err = 0; *pRetMVABuf = 0; if(pNode==NULL) { M4UMSG("error: m4u_query_mva, pNode is NULL, va=0x%x, module=%s! \n", BufAddr, m4u_get_module_name(eModuleID)); return -1; } MMProfileLogEx(M4U_MMP_Events[PROFILE_QUERY], MMProfileFlagStart, eModuleID, BufAddr); mutex_lock(&(pNode->dataMutex)); list_for_each(pListHead, &(pNode->mvaList)) { pList = container_of(pListHead, mva_info_t, link); s = pList->bufAddr; e = s + pList->size - 1; if((pList->eModuleId==eModuleID) && (query_start>=s && query_end<=e)) { if(pList->mvaStart > 0) //here we have allocated mva for this buffer { *pRetMVABuf = pList->mvaStart + (query_start-s); } else // here we have not allocated mva (this buffer is registered, and query for first time) { mva_info_t *pMvaInfo; M4U_ASSERT(pList->flags&MVA_REGION_REGISTER); //we should allocate mva for this buffer //allocate another mva_info node for allocate mva //because allocate mva function will free the list if failed !!! pMvaInfo = m4u_alloc_garbage_list(pList->mvaStart, pList->size, pList->eModuleId, pList->bufAddr, pList->flags, pList->security, pList->cache_coherent); ret = __m4u_alloc_mva(pMvaInfo, NULL); if(ret) { M4UMSG("m4u_alloc_mva failed when query for it: %d\n", ret); err = -EFAULT; } else { pList->flags &= ~(MVA_REGION_REGISTER); pList->mvaStart = pMvaInfo->mvaStart; *pRetMVABuf = pList->mvaStart + (query_start-s); } M4ULOG("allocate for first query: id=%s, addr=0x%08x, size=%d, mva=0x%x \n", m4u_get_module_name(eModuleID), BufAddr, BufSize, *pRetMVABuf); } break; } } mutex_unlock(&(pNode->dataMutex)); MMProfileLogEx(M4U_MMP_Events[PROFILE_QUERY], MMProfileFlagEnd, eModuleID, BufSize); M4ULOG("m4u_query_mva: id=%s, addr=0x%08x, size=%d, mva=0x%x \n", m4u_get_module_name(eModuleID), BufAddr, BufSize, *pRetMVABuf); return err; } #define TVC_MVA_SAFE_MARGIN 0 //(4*1024*50) static int m4u_invalidate_and_check(unsigned int m4u_index, unsigned int start, unsigned int end) { m4u_invalid_tlb_by_range(m4u_index, gM4U_L2_enable, start, end); if(0!=m4u_confirm_range_invalidated(m4u_index, start, end)) // first time fail, invalidate range again { m4u_invalid_tlb_by_range(m4u_index, gM4U_L2_enable, start, end); if(0!=m4u_confirm_range_invalidated(m4u_index, start, end)) // again failed, invalidate all { M4UMSG("invalidate range twice, also fail! \n"); m4u_invalid_tlb_all(m4u_index, gM4U_L2_enable); if(0!=m4u_confirm_range_invalidated(m4u_index, start, end)) // invalidate all failed, die { M4UMSG("invalidate all fail! \n"); } } } return 0; } static int m4u_dealloc_mva_dynamic(M4U_MODULE_ID_ENUM eModuleID, const unsigned int BufAddr, const unsigned int BufSize, unsigned int mvaRegionAddr, struct sg_table* sg_table) { int ret; unsigned int pteStart, pteNr; unsigned int align_page_num, page_num; unsigned int prefetch_distance = 1; M4ULOG("mva dealloc: ID=%s, VA=0x%x, size=%d, mva=0x%x\n", m4u_get_module_name(eModuleID), BufAddr, BufSize, mvaRegionAddr); page_num = M4U_GET_PAGE_NUM(BufAddr, BufSize); align_page_num = ((4-(page_num&(4-1)))&(4-1)) + 4*prefetch_distance; mutex_lock(&gM4uMutex); MMProfileLogEx(M4U_MMP_Events[PROFILE_RELEASE_PAGES], MMProfileFlagStart, eModuleID, BufAddr); m4u_release_pages(eModuleID,BufAddr,BufSize,mvaRegionAddr, sg_table); //================================== // fill pagetable with 0 { pteStart= (unsigned int)mva_pteAddr_nonsec(mvaRegionAddr); // get offset in the page table pteNr = ((BufSize+(BufAddr&0xfff))/DEFAULT_PAGE_SIZE) + (((BufAddr+BufSize)&0xfff)!=0); pteNr += align_page_num; memset((void*)pteStart, 0, pteNr<<2); spin_lock(&gM4u_reg_lock); m4u_invalidate_and_check(m4u_module_2_m4u_id(eModuleID), mvaRegionAddr, mvaRegionAddr+BufSize+align_page_num*0x1000-1); spin_unlock(&gM4u_reg_lock); } MMProfileLogEx(M4U_MMP_Events[PROFILE_RELEASE_PAGES], MMProfileFlagEnd, eModuleID, BufSize); mutex_unlock(&gM4uMutex); MMProfileLogEx(M4U_MMP_Events[PROFILE_RELEASE_MVA_REGION], MMProfileFlagStart, eModuleID, BufAddr); { ret = m4u_do_mva_free(eModuleID, BufAddr, BufSize+align_page_num*0x1000, mvaRegionAddr); } MMProfileLogEx(M4U_MMP_Events[PROFILE_RELEASE_MVA_REGION], MMProfileFlagEnd, eModuleID, BufSize); M4U_mvaGraph_dump_DBG(); return ret; } static int m4u_fill_pagetable(M4U_MODULE_ID_ENUM eModuleID, unsigned int BufAddr, unsigned int BufSize, unsigned int mvaStart, unsigned int entry_flag, struct sg_table* sg_table) { int i; int page_num; unsigned int *pPagetable_nonsec; unsigned int *pPagetable_sec; unsigned int *pPhys; MMProfileLogEx(M4U_MMP_Events[PROFILE_GET_PAGES], MMProfileFlagStart, eModuleID, BufAddr); pPagetable_nonsec = mva_pteAddr_nonsec(mvaStart); pPagetable_sec = mva_pteAddr_sec(mvaStart); page_num = M4U_GET_PAGE_NUM(BufAddr, BufSize); pPhys = (unsigned int*)vmalloc(page_num*sizeof(unsigned int*)); if(pPhys == NULL) { MMProfileLogEx(M4U_MMP_Events[PROFILE_GET_PAGES], MMProfileFlagEnd, eModuleID, BufSize); M4UMSG("m4u_fill_pagetable: error to vmalloc %d*4 size\n", page_num); return 0; } if(sg_table != NULL) page_num = m4u_get_pages_sg(eModuleID, BufAddr, BufSize, sg_table, pPhys); else page_num = m4u_get_pages(eModuleID, BufAddr, BufSize, pPhys); if(page_num<=0) { MMProfileLogEx(M4U_MMP_Events[PROFILE_GET_PAGES], MMProfileFlagEnd, eModuleID, BufSize); M4UDBG("Error: m4u_get_pages failed \n"); return 0; } mutex_lock(&gM4uMutex); //fill page table for(i=0;i<page_num;i++) { unsigned int pa = pPhys[i]; pa |= entry_flag; #ifdef M4U_USE_ONE_PAGETABLE *(pPagetable_nonsec+i) = pa; #else if(!(entry_flag&0x8)) { *(pPagetable_sec+i) = pa; } else { #ifdef M4U_COPY_NONSEC_PT_TO_SEC *(pPagetable_nonsec+i) = pa; *(pPagetable_sec+i) = pa; #else *(pPagetable_nonsec+i) = pa; #endif } #endif } vfree(pPhys); mb(); MMProfileLogEx(M4U_MMP_Events[PROFILE_GET_PAGES], MMProfileFlagEnd, eModuleID, BufSize); ///------------------------------------------------------- ///flush tlb entries in this mva range MMProfileLogEx(M4U_MMP_Events[PROFILE_ALLOC_FLUSH_TLB], MMProfileFlagStart, eModuleID, BufAddr); spin_lock(&gM4u_reg_lock); m4u_invalidate_and_check(m4u_module_2_m4u_id(eModuleID), mvaStart, mvaStart+BufSize-1); spin_unlock(&gM4u_reg_lock); MMProfileLogEx(M4U_MMP_Events[PROFILE_ALLOC_FLUSH_TLB], MMProfileFlagEnd, eModuleID, BufSize); // record memory usage pmodule_current_size[eModuleID] += BufSize; if(pmodule_current_size[eModuleID]>gModuleMaxMVASize[eModuleID]) { m4u_aee_print("overflow: larb=%d, module=%s, mvaSize=%d(max=%d)\n", m4u_module_2_larb(eModuleID), m4u_get_module_name(eModuleID), pmodule_current_size[eModuleID], gModuleMaxMVASize[eModuleID]); M4UMSG("hint: alloc mva but forget to free it!!\n"); m4u_dump_mva_info(); } if(pmodule_current_size[eModuleID]> pmodule_max_size[eModuleID]) { pmodule_max_size[eModuleID] = pmodule_current_size[eModuleID]; } mutex_unlock(&gM4uMutex); return page_num; } /* add for ovl: this function will build pagetable for framebuffer, its mva==pa. so when switch from LK to kernel, ovl just switch to virtual mode, no need to modify address register !!!! NOTES: 1. only be used by ovl for frame buffer 2. currently, total mva is 1G frame buffer pa is > 0xf0000000 so it won't be corrupted by other m4u_alloc_mva() */ int m4u_fill_linear_pagetable(unsigned int pa, unsigned int size) { int page_num, i; unsigned int mva = pa&(~M4U_PAGE_MASK); unsigned int *pPt = mva_pteAddr_nonsec(mva); page_num = M4U_GET_PAGE_NUM(pa, size); pa = mva; //page align for(i=0; i<page_num; i++) { pPt[i] = pa | F_DESC_VALID | F_DESC_NONSEC(1); pa += M4U_PAGE_SIZE; } return 0; } int m4u_erase_linear_pagetable(unsigned int pa, unsigned int size) { int page_num, i; unsigned int *pPt = mva_pteAddr_nonsec(pa); page_num = M4U_GET_PAGE_NUM(pa, size); for(i=0; i<page_num; i++) { pPt[i] = 0; } return 0; } int __m4u_alloc_mva(mva_info_t *pMvaInfo, struct sg_table *sg_table) { M4U_MODULE_ID_ENUM eModuleID = pMvaInfo->eModuleId; const unsigned int BufAddr = pMvaInfo->bufAddr; const unsigned int BufSize = pMvaInfo->size; int security = pMvaInfo->security; int cache_coherent = pMvaInfo->cache_coherent; unsigned int page_num, align_page_num; unsigned int mvaStart; unsigned int i; unsigned int entry_flag = F_DESC_VALID | F_DESC_NONSEC(!security) | F_DESC_SHARE(!!cache_coherent); int prefetch_distance = 1; unsigned int m4u_index = m4u_module_2_m4u_id(eModuleID); unsigned int m4u_base = gM4UBaseAddr[m4u_index]; MMProfileLogEx(M4U_MMP_Events[PROFILE_ALLOC_MVA], MMProfileFlagStart, eModuleID, BufAddr); MMProfileLogEx(M4U_MMP_Events[PROFILE_ALLOC_MVA], MMProfileFlagPulse, current->tgid, 0); page_num = M4U_GET_PAGE_NUM(BufAddr, BufSize); align_page_num = ((4-(page_num&(4-1)))&(4-1)) + 4*prefetch_distance; mvaStart= m4u_do_mva_alloc(eModuleID, BufAddr, BufSize+align_page_num*0x1000, pMvaInfo); if(mvaStart == 0) { m4u_aee_print("alloc mva fail: larb=%d,module=%s,size=%d\n", m4u_module_2_larb(eModuleID), m4u_get_module_name(eModuleID), BufSize); M4UMSG("mva_alloc error: no available MVA region for %d bytes!\n", BufSize); m4u_mvaGraph_dump(); m4u_free_garbage_list(pMvaInfo); return -1; } page_num = m4u_fill_pagetable(eModuleID, BufAddr, BufSize, mvaStart, entry_flag, sg_table); if(page_num==0) { M4UMSG("alloc_mva error: id=%s, addr=0x%08x, size=%d, sec=%d\n", m4u_get_module_name(eModuleID), BufAddr, BufSize, security); goto error_alloc_mva; } //for m4u bug in mt6589: we need our entry_start and entry_end align at 128 bits (pfh tlb is 128bits align) //this aims to ensure that no invalid tlb will be fetched into tlb //TODO: now prefetch_distance is 1 for most engine. if prefetch is not 1, we should modify this too { unsigned int *pPagetable_nonsec; unsigned int *pPagetable_sec; unsigned int pa = gM4U_align_page_pa|entry_flag; pPagetable_sec = mva_pteAddr_sec(mvaStart)+page_num; pPagetable_nonsec = mva_pteAddr_nonsec(mvaStart)+page_num; for(i=0; i<align_page_num; i++) { #ifdef M4U_USE_ONE_PAGETABLE *(pPagetable_nonsec+i) = pa; #else if(security) { *(pPagetable_sec+i) = pa; } else { #ifdef M4U_COPY_NONSEC_PT_TO_SEC *(pPagetable_nonsec+i) = pa; *(pPagetable_sec+i) = pa; #else *(pPagetable_nonsec+i) = pa; #endif } #endif } } if(g_debug_make_translation_fault == 1) { unsigned int *pPagetable_nonsec; unsigned int *pPagetable_sec; pPagetable_sec = mva_pteAddr_sec(mvaStart); pPagetable_nonsec = mva_pteAddr_nonsec(mvaStart); *pPagetable_sec = 0; *pPagetable_nonsec = 0; } pMvaInfo->mvaStart = mvaStart; MMProfileLogEx(M4U_MMP_Events[PROFILE_ALLOC_MVA], MMProfileFlagEnd, mvaStart, BufSize); M4UINFO("alloc_mva_dynamic: id=%s, addr=0x%08x, size=%d,sec=%d, mva=0x%x, mva_end=0x%x, 0x5c0 = 0x%x, 0x5c4 = 0x%x, 0x5c8 = 0x%x, 0x5cc = 0x%x, 0x5d0 = 0x%x.\n", m4u_get_module_name(eModuleID), BufAddr, BufSize, security, pMvaInfo->mvaStart, pMvaInfo->mvaStart+BufSize-1, M4U_ReadReg32(m4u_base, 0x5C0), M4U_ReadReg32(m4u_base, 0x5C4), M4U_ReadReg32(m4u_base, 0x5C8),M4U_ReadReg32(m4u_base, 0x5CC),M4U_ReadReg32(m4u_base, 0x5D0)); return 0; error_alloc_mva: m4u_do_mva_free(eModuleID, BufAddr, BufSize+align_page_num*0x1000, mvaStart); M4UINFO("alloc_mva error: id=%s, addr=0x%08x, size=%d, sec=%d\n", m4u_get_module_name(eModuleID), BufAddr, BufSize, security); return -1; } #define MVA_PROTECT_BUFFER_SIZE 1024*1024 int __m4u_dealloc_mva(M4U_MODULE_ID_ENUM eModuleID, const unsigned int BufAddr, const unsigned int BufSize, const unsigned int MVA, struct sg_table* sg_table) { int ret; unsigned int m4u_index = m4u_module_2_m4u_id(eModuleID); unsigned int m4u_base = gM4UBaseAddr[m4u_index]; M4UINFO("m4u_dealloc_mva, module=%s, addr=0x%x, size=0x%x, MVA=0x%x, mva_end=0x%x, 0x5c0 = 0x%x, 0x5c4 = 0x%x, 0x5c8 = 0x%x, 0x5cc = 0x%x, 0x5d0 = 0x%x.\n", m4u_get_module_name(eModuleID), BufAddr, BufSize, MVA, MVA+BufSize-1, M4U_ReadReg32(m4u_base, 0x5C0), M4U_ReadReg32(m4u_base, 0x5C4), M4U_ReadReg32(m4u_base, 0x5C8),M4U_ReadReg32(m4u_base, 0x5CC),M4U_ReadReg32(m4u_base, 0x5D0)); MMProfileLogEx(M4U_MMP_Events[PROFILE_DEALLOC_MVA], MMProfileFlagStart, eModuleID, BufAddr); MMProfileLogEx(M4U_MMP_Events[PROFILE_DEALLOC_MVA], MMProfileFlagPulse, current->tgid, 0); //if(eModuleID!=M4U_CLNTMOD_RDMA_GENERAL && eModuleID!=M4U_CLNTMOD_ROT_GENERAL) { if(m4u_invalid_seq_range_by_mva(m4u_module_2_m4u_id(eModuleID), MVA, MVA+BufSize-1)==0) { M4UMSG("warning: dealloc mva without invalid tlb range!! id=%s,add=0x%x,size=0x%x,mva=0x%x\n", m4u_get_module_name(eModuleID), BufAddr, BufSize, MVA); } } ret = m4u_dealloc_mva_dynamic(eModuleID, BufAddr, BufSize, MVA, sg_table); MMProfileLogEx(M4U_MMP_Events[PROFILE_DEALLOC_MVA], MMProfileFlagEnd, MVA, BufSize); return ret; } static int m4u_invalid_seq_all(M4U_MODULE_ID_ENUM eModuleID) { unsigned int i; unsigned int m4u_index = m4u_module_2_m4u_id(eModuleID); unsigned int m4u_base = gM4UBaseAddr[m4u_index]; unsigned int m4u_index_offset = (SEQ_RANGE_NUM)*m4u_index; M4ULOG("m4u_invalid_tlb_all, module:%s \n", m4u_get_module_name(eModuleID)); M4U_POW_ON_TRY(eModuleID); spin_lock(&gM4u_reg_lock); if(FreeSEQRegs[m4u_index] < SEQ_RANGE_NUM) { for(i=0;i<SEQ_RANGE_NUM;i++) { if(pRangeDes[i+m4u_index_offset].Enabled == 1) { pRangeDes[i].Enabled = 0; M4U_WriteReg32(m4u_base, REG_MMU_SQ_START(i), 0); M4U_WriteReg32(m4u_base, REG_MMU_SQ_END(i), 0); FreeSEQRegs[m4u_index]++; } } } m4u_invalid_tlb_all(m4u_index, 0); M4U_POW_OFF_TRY(eModuleID); spin_unlock(&gM4u_reg_lock); return 0; } static inline int mva_owner_match(M4U_MODULE_ID_ENUM id, M4U_MODULE_ID_ENUM owner) { if(owner == id) return 1; #if 0 if(owner==M4U_CLNTMOD_RDMA_GENERAL && (id==M4U_CLNTMOD_RDMA0||id==M4U_CLNTMOD_RDMA1) ) { return 1; } if(owner==M4U_CLNTMOD_ROT_GENERAL && (id==M4U_CLNTMOD_VDO_ROT0|| id==M4U_CLNTMOD_RGB_ROT0|| id==M4U_CLNTMOD_RGB_ROT1|| id==M4U_CLNTMOD_VDO_ROT1|| id==M4U_CLNTMOD_RGB_ROT2) ) { return 1; } #endif return 0; } static int m4u_manual_insert_entry(M4U_PORT_ID_ENUM eModuleID, unsigned int EntryMVA, int secure_pagetable, int Lock) { unsigned int *pPageAddr = 0; unsigned int EntryPA; unsigned int m4u_base = gM4UBaseAddr[m4u_port_2_m4u_id(eModuleID)]; M4UDBG("m4u_manual_insert_entry, module:%s, EntryMVA:0x%x,secure:%d, Lock:%d \r\n", m4u_get_port_name(eModuleID), EntryMVA, secure_pagetable, Lock); if(secure_pagetable) { pPageAddr = mva_pteAddr_sec(EntryMVA); } else { pPageAddr = mva_pteAddr_nonsec(EntryMVA); } EntryPA = *pPageAddr; // EntryPA &= 0xFFFFF000; //clear bit0~11 EntryMVA &= 0xFFFFF000; //clear bit0~11 if(Lock) { EntryMVA |= F_PROG_VA_LOCK_BIT; } if(secure_pagetable && (!(EntryPA&F_DESC_NONSEC(1)))) { EntryMVA |= F_PROG_VA_SECURE_BIT; } M4U_WriteReg32(m4u_base, REG_MMU_PROG_VA, EntryMVA); M4U_WriteReg32(m4u_base, REG_MMU_PROG_DSC, EntryPA); M4U_WriteReg32(m4u_base, REG_MMU_PROG_EN, F_MMU_PROG_EN); return 0; } // #define M4U_PRINT_RANGE_DETAIL // dump range infro when no available range can be found #define M4U_INVALID_ID 0x5555 int m4u_do_insert_seq_range(M4U_PORT_ID_ENUM eModuleID, unsigned int MVAStart, unsigned int MVAEnd, unsigned int entryCount) { //causion: we should hold m4u global unsigned int i; unsigned int RangeReg_ID = M4U_INVALID_ID; unsigned int m4u_index = m4u_module_2_m4u_id(eModuleID); unsigned int m4u_base = gM4UBaseAddr[m4u_index]; unsigned int m4u_index_offset = (SEQ_RANGE_NUM)*m4u_index; if(entryCount!=1 && entryCount!=2 && entryCount!=4 && entryCount!=8 && entryCount!=16) entryCount = 1; mutex_lock(&gM4uMutex); if(mva2module(MVAStart)>=M4U_PORT_UNKNOWN) { m4u_aee_print("insert range fail: larb=%d,module=%s\n", m4u_module_2_larb(eModuleID), m4u_get_module_name(eModuleID)); M4UMSG(" m4u_insert_seq_range module=%s, MVAStart=0x%x is %s, MVAEnd=0x%x is %s\n", m4u_get_module_name(eModuleID), MVAStart, m4u_get_module_name(mva2module(MVAStart)), MVAEnd, m4u_get_module_name(mva2module(MVAEnd))); m4u_mvaGraph_dump(); } M4ULOG("m4u_insert_seq_range , module:%s, MVAStart:0x%x, MVAEnd:0x%x, entryCount=%d \r\n", m4u_get_module_name(eModuleID), MVAStart, MVAEnd, entryCount); //================================== //no seq range error if(FreeSEQRegs[m4u_index] == 0) { M4ULOG("No seq range found. module=%s \n", m4u_get_module_name(eModuleID)); #ifdef M4U_PRINT_RANGE_DETAIL M4UMSG("m4u_insert_seq_range , module:%s, MVAStart:0x%x, MVAEnd:0x%x, entryCount=%d \r\n", m4u_get_module_name(eModuleID), MVAStart, MVAEnd, entryCount); M4UMSG(" Curent Range Info: \n"); for(i=0;i<TOTAL_RANGE_NUM;i++) { if(1==pRangeDes[i].Enabled) { M4UMSG("pRangeDes[%d]: Enabled=%d, module=%s, MVAStart=0x%x, MVAEnd=0x%x \n", i, pRangeDes[i].Enabled, m4u_get_module_name(pRangeDes[i].eModuleID), pRangeDes[i].MVAStart, pRangeDes[i].MVAEnd); } } #endif mutex_unlock(&gM4uMutex); return 0; } //=============================================== //every seq range has to align to 256K Bytes MVAStart &= ~M4U_SEQ_ALIGN_MSK; MVAEnd |= M4U_SEQ_ALIGN_MSK; //================================================================== // check if the range is overlap with previous ones for(i=m4u_index_offset;i<m4u_index_offset+SEQ_RANGE_NUM;i++) { if(1==pRangeDes[i].Enabled) { if(MVAEnd<pRangeDes[i].MVAStart || MVAStart>pRangeDes[i].MVAEnd) //no overlap { continue; } else { M4UMSG("insert range overlap!: larb=%d,module=%s\n", m4u_module_2_larb(eModuleID), m4u_get_module_name(eModuleID)); M4UMSG("error: insert tlb range is overlapped with previous ranges, current process=%s,!\n", current->comm); M4UMSG("module=%s, mva_start=0x%x, mva_end=0x%x \n", m4u_get_module_name(eModuleID), MVAStart, MVAEnd); M4UMSG("overlapped range id=%d, module=%s, mva_start=0x%x, mva_end=0x%x \n", i, m4u_get_module_name(pRangeDes[i].eModuleID), pRangeDes[i].MVAStart, pRangeDes[i].MVAEnd); mutex_unlock(&gM4uMutex); return 0; } } } //======================================== //find a free seq range if(FreeSEQRegs[m4u_index]>0) ///> first search in low priority { for(i=m4u_index_offset;i<m4u_index_offset+SEQ_RANGE_NUM;i++) { if(pRangeDes[i].Enabled == 0) { RangeReg_ID = i; FreeSEQRegs[m4u_index]--; break; } } } if(RangeReg_ID == M4U_INVALID_ID) { M4ULOG("error: can not find available range \n"); mutex_unlock(&gM4uMutex); return 0; // do not have to return erro to up-layer, nothing will happen even insert tlb range fails } //====================================================== // write register to insert seq range ///> record range information in array pRangeDes[RangeReg_ID].Enabled = 1; pRangeDes[RangeReg_ID].eModuleID = eModuleID; pRangeDes[RangeReg_ID].MVAStart = MVAStart; pRangeDes[RangeReg_ID].MVAEnd = MVAEnd; pRangeDes[RangeReg_ID].entryCount = entryCount; ///> set the range register MVAStart &= F_SQ_VA_MASK; MVAStart |= F_SQ_MULTI_ENTRY_VAL(entryCount-1); MVAStart |= F_SQ_EN_BIT; //align mvaend to 256K MVAEnd |= ~F_SQ_VA_MASK; spin_lock(&gM4u_reg_lock); { M4U_POW_ON_TRY(eModuleID); M4U_WriteReg32(m4u_base, REG_MMU_SQ_START(RangeReg_ID-m4u_index_offset), MVAStart); M4U_WriteReg32(m4u_base, REG_MMU_SQ_END(RangeReg_ID-m4u_index_offset), MVAEnd); M4U_POW_OFF_TRY(eModuleID); } spin_unlock(&gM4u_reg_lock); mutex_unlock(&gM4uMutex); return 0; } //end of vM4USetUniupdateRangeInTLB() int m4u_invalid_seq_range_by_mva(int m4u_index, unsigned int MVAStart, unsigned int MVAEnd) { unsigned int i; unsigned int m4u_base = gM4UBaseAddr[m4u_index]; unsigned int m4u_index_offset = SEQ_RANGE_NUM*m4u_index; int ret=-1; MVAStart &= ~M4U_SEQ_ALIGN_MSK; MVAEnd |= M4U_SEQ_ALIGN_MSK; M4UDBG("m4u_invalid_tlb_range_by_mva, MVAStart:0x%x, MVAEnd:0x%x \r\n", MVAStart, MVAEnd); mutex_lock(&gM4uMutex); spin_lock(&gM4u_reg_lock); M4U_POW_ON_TRY(m4u_index); if(FreeSEQRegs[m4u_index] < SEQ_RANGE_NUM) { for(i=m4u_index_offset;i<m4u_index_offset+SEQ_RANGE_NUM;i++) { if(pRangeDes[i].Enabled == 1 && pRangeDes[i].MVAStart>=MVAStart && pRangeDes[i].MVAEnd<=MVAEnd) { pRangeDes[i].Enabled = 0; M4U_WriteReg32(m4u_base, REG_MMU_SQ_START(i-m4u_index_offset), 0); M4U_WriteReg32(m4u_base, REG_MMU_SQ_END(i-m4u_index_offset), 0); mb(); FreeSEQRegs[m4u_index]++; break; } } } //trig invalidate register, 6589 invalid is moved to M4U global register area m4u_invalid_tlb_by_range(m4u_index, gM4U_L2_enable, MVAStart, MVAEnd); spin_unlock(&gM4u_reg_lock); mutex_unlock(&gM4uMutex); return ret; } //only check MEP_ROTVO, MDP_ROTCO, MDP_ROTO //return 0: va, 1:pa int m4u_do_check_port_va_or_pa(M4U_PORT_STRUCT* pM4uPort) { int ret = 0; M4U_PORT_ID_ENUM PortID = (pM4uPort->ePortID); unsigned int m4u_base = gM4UBaseAddr[m4u_port_2_m4u_id(PortID)]; unsigned int regVal; if (PortID == MDP_ROTVO) { regVal = M4U_ReadReg32(m4u_base, 0x5C4); if ((regVal & 0x8) == 0x0) { M4UMSG("m4u_check_port_va_or_pa port = %s, 0x5C4 = 0x%x", m4u_get_port_name(PortID), regVal); ret = 1; } } if (PortID == MDP_ROTCO) { regVal = M4U_ReadReg32(m4u_base, 0x5C0); if ((regVal & 0x80000000) == 0x0) { M4UMSG("m4u_check_port_va_or_pa port = %s, 0x5C0 = 0x%x", m4u_get_port_name(PortID), regVal); ret = 1; } } if (PortID == MDP_ROTO) { regVal = M4U_ReadReg32(m4u_base, 0x5C0); if ((regVal & 0x08000000) == 0x0) { M4UMSG("m4u_check_port_va_or_pa port = %s, 0x5C0 = 0x%x", m4u_get_port_name(PortID), regVal); ret = 1; } } return ret; } int m4u_do_config_port(M4U_PORT_STRUCT* pM4uPort) //native { M4U_PORT_ID_ENUM PortID = (pM4uPort->ePortID); unsigned int m4u_base = gM4UBaseAddr[m4u_port_2_m4u_id(PortID)]; M4U_MODULE_ID_ENUM eModuleID = m4u_port_2_module(PortID); unsigned int sec_con_val; pM4uPort->Distance = 1; pM4uPort->Direction = 0; M4UINFO("m4u_config_port(), port=%s, Virtuality=%d, Security=%d, 0x5c0 = 0x%x, 0x5c4 = 0x%x, 0x5c8 = 0x%x, 0x5cc = 0x%x, 0x5d0 = 0x%x.\n", m4u_get_port_name(pM4uPort->ePortID), pM4uPort->Virtuality, pM4uPort->Security, M4U_ReadReg32(m4u_base, 0x5C0), M4U_ReadReg32(m4u_base, 0x5C4), M4U_ReadReg32(m4u_base, 0x5C8),M4U_ReadReg32(m4u_base, 0x5CC),M4U_ReadReg32(m4u_base, 0x5D0)); if ((PortID == MDP_ROTO || PortID == MDP_ROTCO || PortID == MDP_ROTVO) && pM4uPort->Virtuality == 0) { m4u_aee_print("m4u_config_port(), port=%s, Virtuality=%d, Security=%d, 0x5c0 = 0x%x, 0x5c4 = 0x%x, 0x5c8 = 0x%x, 0x5cc = 0x%x, 0x5d0 = 0x%x \n", m4u_get_port_name(pM4uPort->ePortID), pM4uPort->Virtuality, pM4uPort->Security, M4U_ReadReg32(m4u_base, 0x5C0), M4U_ReadReg32(m4u_base, 0x5C4), M4U_ReadReg32(m4u_base, 0x5C8),M4U_ReadReg32(m4u_base, 0x5CC),M4U_ReadReg32(m4u_base, 0x5D0)); } MMProfileLogEx(M4U_MMP_Events[PROFILE_CONFIG_PORT], MMProfileFlagStart, eModuleID, pM4uPort->ePortID); enable_mux(MT_MUX_MM, "m4u"); spin_lock(&gM4u_reg_lock); // Direction, one bit for each port, 1:-, 0:+ m4uHw_set_field_by_mask(m4u_base, REG_MMU_PFH_DIR(PortID),\ F_MMU_PFH_DIR(PortID, 1), F_MMU_PFH_DIR(PortID, pM4uPort->Direction)); // Distance if(pM4uPort->Distance>16) { M4ULOG("m4u_config_port() error, port=%s, Virtuality=%d, Security=%d, Distance=%d, Direction=%d \n", m4u_get_port_name(pM4uPort->ePortID), pM4uPort->Virtuality, pM4uPort->Security, pM4uPort->Distance, pM4uPort->Direction); } m4uHw_set_field_by_mask(m4u_base, REG_MMU_PFH_DIST(PortID),\ F_MMU_PFH_DIST_MASK(PortID), F_MMU_PFH_DIST_VAL(PortID,pM4uPort->Distance)); // Virtuality, 1:V, 0:P sec_con_val = 0; if(pM4uPort->Virtuality) { sec_con_val |= F_SMI_SECUR_CON_VIRTUAL(PortID); } if(pM4uPort->Security) { sec_con_val |= F_SMI_SECUR_CON_SECURE(PortID); } sec_con_val |= F_SMI_SECUR_CON_DOMAIN(PortID, 3);//pM4uPort->domain); m4uHw_set_field_by_mask(0, REG_SMI_SECUR_CON_OF_PORT(PortID),\ F_SMI_SECUR_CON_MASK(PortID), sec_con_val); spin_unlock(&gM4u_reg_lock); disable_mux(MT_MUX_MM, "m4u"); MMProfileLogEx(M4U_MMP_Events[PROFILE_CONFIG_PORT], MMProfileFlagEnd, pM4uPort->Virtuality, pM4uPort->ePortID); return 0; } static void m4u_get_perf_counter(int m4u_index, M4U_PERF_COUNT *pM4U_perf_count) { unsigned int m4u_base = gM4UBaseAddr[m4u_index]; pM4U_perf_count->transaction_cnt= M4U_ReadReg32(m4u_base, REG_MMU_ACC_CNT); ///> Transaction access count pM4U_perf_count->main_tlb_miss_cnt= M4U_ReadReg32(m4u_base, REG_MMU_MAIN_MSCNT); ///> Main TLB miss count pM4U_perf_count->pfh_tlb_miss_cnt= M4U_ReadReg32(m4u_base, REG_MMU_PF_MSCNT); ///> Prefetch TLB miss count pM4U_perf_count->pfh_cnt = M4U_ReadReg32(m4u_base, REG_MMU_PF_CNT); ///> Prefetch count } int m4u_do_monitor_start(int m4u_id) { unsigned int m4u_base = gM4UBaseAddr[m4u_id]; //clear GMC performance counter m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG, F_MMU_CTRL_MONITOR_CLR(1), F_MMU_CTRL_MONITOR_CLR(1)); m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG, F_MMU_CTRL_MONITOR_CLR(1), F_MMU_CTRL_MONITOR_CLR(0)); //enable GMC performance monitor m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG, F_MMU_CTRL_MONITOR_EN(1), F_MMU_CTRL_MONITOR_EN(1)); return 0; } int m4u_do_monitor_stop(int m4u_id) { M4U_PERF_COUNT cnt; unsigned int m4u_base = gM4UBaseAddr[m4u_id]; //disable GMC performance monitor m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG, F_MMU_CTRL_MONITOR_EN(1), F_MMU_CTRL_MONITOR_EN(0)); m4u_get_perf_counter(m4u_id, &cnt); //read register get the count MMProfileLogEx(M4U_MMP_Events[PROFILE_MAIN_TLB_MON], MMProfileFlagStart, (unsigned int) m4u_id, cnt.transaction_cnt); MMProfileLogEx(M4U_MMP_Events[PROFILE_PREF_TLB_MON], MMProfileFlagStart, (unsigned int) m4u_id, cnt.pfh_cnt); MMProfileLogEx(M4U_MMP_Events[PROFILE_MAIN_TLB_MON], MMProfileFlagEnd, (unsigned int) m4u_id, cnt.main_tlb_miss_cnt); MMProfileLogEx(M4U_MMP_Events[PROFILE_PREF_TLB_MON], MMProfileFlagEnd, (unsigned int) m4u_id, cnt.pfh_tlb_miss_cnt); M4UMSG("[M4U] total:%d, main miss:%d, pfh miss(walk):%d, auto pfh:%d\n", cnt.transaction_cnt, cnt.main_tlb_miss_cnt, cnt.pfh_tlb_miss_cnt,cnt.pfh_cnt); if(0!=cnt.transaction_cnt) { M4UMSG("main miss:%d%%, pfh miss:%d%%\n", 100*cnt.main_tlb_miss_cnt/cnt.transaction_cnt, 100*cnt.pfh_tlb_miss_cnt/cnt.transaction_cnt); } else { M4UMSG("[M4U] no transaction happened! \r\n"); } return 0; } static void m4u_L2_monitor_start(unsigned perf_msk) { unsigned int regval, mask; regval = F_L2_GDC_PERF_MASK(perf_msk) | F_L2_GDC_PERF_EN(1); mask = F_L2_GDC_PERF_MASK(0xffff) | F_L2_GDC_PERF_EN(1); m4uHw_set_field_by_mask(0, REG_L2_GDC_OP, mask, regval); } static void m4u_L2_monitor_stop(unsigned int perf_cnt[2]) { unsigned int perf_msk, regval; unsigned int cnt0, cnt1; regval = COM_ReadReg32(REG_L2_GDC_OP); cnt0 = COM_ReadReg32(REG_L2_GDC_PERF0); cnt1=0; perf_msk = F_GET_L2_GDC_PERF_MASK(regval); M4UMSG("L2 cache monitor stop: \n"); if(perf_msk == GDC_PERF_MASK_HIT_MISS) { M4UMSG("hit/miss counter: \n"); M4UMSG("total:%d, hit:%d, miss:%d, miss_rate:%d%%\n", cnt0+cnt1, cnt0, cnt1, 100*cnt1/(cnt0+cnt1)); } else if(perf_msk == GDC_PERF_MASK_RI_RO) { M4UMSG("ri/ro counter: \n"); M4UMSG("total:%d, ri:%d, ro:%d, ro_rate:%d%%\n", cnt0+cnt1, cnt0, cnt1, 100*cnt1/(cnt0+cnt1)); } /*else if(perf_msk == GDC_PERF_MASK_BUSY_CYCLE) { M4UMSG("busy cycle counter: high32:0x%x, low32:0x%x\n", cnt1, cnt0); }*/ else if(perf_msk == GDC_PERF_MASK_READ_OUTSTAND_FIFO) { M4UMSG("READ_OUTSTAND_FIFO: cnt0:0x%x, cnt1:0x%x\n", cnt0, cnt1); } else { M4UMSG("wrong performance mask of L2 cache %d\n", perf_msk); M4U_ASSERT(0); } if(perf_cnt) { perf_cnt[0] = cnt0; perf_cnt[1] = cnt1; } m4uHw_set_field_by_mask(0, REG_L2_GDC_OP, F_L2_GDC_PERF_EN(1), F_L2_GDC_PERF_EN(0)); } static void m4u_print_perf_counter(int m4u_index, const char *msg) { M4U_PERF_COUNT cnt; m4u_get_perf_counter(m4u_index, &cnt); M4UMSG("====m4u performance count for %s======\n", msg); M4UMSG("total trans=%d, main_miss=%d, pfh_miss=%d, pfh_cnt=%d\n", cnt.transaction_cnt, cnt.main_tlb_miss_cnt, cnt.pfh_tlb_miss_cnt, cnt.pfh_cnt); } #define __M4U_MLOCK_IN_USER_SPACE__ #ifdef __M4U_MLOCK_IN_USER_SPACE__ static int m4u_put_unlock_page(struct page* page) { put_page(page); return 0; } static int _m4u_get_pages_user(M4U_MODULE_ID_ENUM eModuleID, unsigned int va, unsigned int size, int page_num, struct vm_area_struct *vma, unsigned int* pPhys) { unsigned int pa; unsigned int va_align = round_down(va, PAGE_SIZE); struct page *page; int i; for(i=0; i<page_num; i++) { unsigned int va_tmp = va_align+i*PAGE_SIZE; if(va_tmp >= vma->vm_end || va_tmp < vma->vm_start) { vma = find_vma(current->mm, va_tmp); if(!vma || vma->vm_start > va_tmp) { m4u_aee_print("cannot find vma, va=0x%x, size=0x%x, fail_va=0x%x, pa=0x%x\n", va, size, va_tmp, pa); m4u_dump_maps(va); goto err; } if(!((vma->vm_flags) & VM_LOCKED)) { m4u_aee_print("no mlock on vma\n"); m4u_dump_maps(va); m4u_dump_maps(va_tmp); dump_stack(); return -1; } } pa = m4u_user_v2p(va_tmp); page = phys_to_page(pa); //all page should have mlock on it! (we uses mlock() in m4u lib) if(!pa)//|| !PageMlocked(page)) { m4u_aee_print("get pa fail, va=0x%x, size=0x%x, fail_va=0x%x, pa=0x%x\n", va, size, va_tmp, pa); if(pa) dump_page(page); m4u_dump_maps(va); m4u_dump_maps(va_tmp); dump_stack(); goto err; } get_page(page); *(pPhys+i) = pa | 0x20; } return 0; err: for(i--; i>=0; i--) { pa = round_down(*(pPhys+i) , PAGE_SIZE); m4u_put_unlock_page(phys_to_page(pa)); *(pPhys+i) = 0; } return -1; } #else static int m4u_put_unlock_page(struct page* page) { unsigned int pfn; int ret = 0; int trycnt; pfn = page_to_pfn(page); MMProfileLogEx(M4U_MMP_Events[PROFILE_MUNLOCK], MMProfileFlagStart, 0, (unsigned int)(pfn<<12)); if(pMlock_cnt[pfn]) { if(!PageMlocked(page)) { ret = -1; } pMlock_cnt[pfn]--; if(pMlock_cnt[pfn] == 0) { /* Use retry version to guarantee no leakage */ trycnt = 3000; do { if (trylock_page(page)) { munlock_vma_page(page); unlock_page(page); break; } mdelay(5); } while (trycnt-- > 0); if(PageMlocked(page)==1) { M4UMSG(" Can't munlock page: \n"); dump_page(page); } } put_page(page); } else { M4UMSG("warning pMlock_cnt[%d]==0 !! \n", pfn); ret = -1; } MMProfileLogEx(M4U_MMP_Events[PROFILE_MUNLOCK], MMProfileFlagEnd, 0, 0x1000); return ret; } static int _m4u_get_pages_user(M4U_MODULE_ID_ENUM eModuleID, unsigned int BufAddr, unsigned int size, int page_num, struct vm_area_struct *vma, unsigned int* pPhys) { int write_mode, ret, i; write_mode = (vma->vm_flags&VM_WRITE)?1:0; ret = m4u_get_user_pages( eModuleID, current, current->mm, BufAddr, page_num, write_mode, //m4u_get_write_mode_by_module(eModuleID), // 1 /* write */ 0, /* force */ (struct page**)pPhys, NULL); if(ret<page_num) { // release pages first for(i=0;i<ret;i++) { m4u_put_unlock_page((struct page*)(*(pPhys+i))); } if(unlikely(fatal_signal_pending(current))) { M4UMSG("error: receive sigkill during get_user_pages(), page_num=%d, return=%d, module=%s, current_process:%s \n", page_num, ret, m4u_get_module_name(eModuleID), current->comm); } else { if(ret>0) //return value bigger than 0 but smaller than expected, trigger red screen { M4UMSG("error: page_num=%d, get_user_pages return=%d, module=%s, current_process:%s \n", page_num, ret, m4u_get_module_name(eModuleID), current->comm); M4UMSG("error hint: maybe the allocated VA size is smaller than the size configured to m4u_alloc_mva()!"); } else // return vaule is smaller than 0, maybe the buffer is not exist, just return error to up-layer { M4UMSG("error: page_num=%d, get_user_pages return=%d, module=%s, current_process:%s \n", page_num, ret, m4u_get_module_name(eModuleID), current->comm); M4UMSG("error hint: maybe the VA is deallocated before call m4u_alloc_mva(), or no VA has be ever allocated!"); } m4u_dump_maps(BufAddr); } return -EFAULT; } // add locked pages count, used for debug whether there is memory leakage pmodule_locked_pages[eModuleID] += page_num; for(i=0;i<page_num;i++) { *(pPhys+i) = page_to_phys((struct page*)(*(pPhys+i)))|0x20; } return 0; } #endif static int m4u_get_pages_user(M4U_MODULE_ID_ENUM eModuleID, unsigned int BufAddr, unsigned int BufSize, int page_num, unsigned int* pPhys) { unsigned int start_pa, write_mode; struct vm_area_struct *vma; int i, ret; if(BufSize>MAX_BUF_SIZE_TO_GET_USER_PAGE) { m4u_aee_print("alloc mva fail: larb=%d,module=%s,size=%d\n", m4u_module_2_larb(eModuleID), m4u_get_module_name(eModuleID), BufSize); M4UMSG("m4u_get_pages(), single time alloc size=0x%x, bigger than limit=0x%x \n", BufSize, MAX_BUF_SIZE_TO_GET_USER_PAGE); return -EFAULT; } down_read(&current->mm->mmap_sem); vma = find_vma(current->mm, BufAddr); if(vma == NULL) { M4UMSG("cannot find vma: module=%s, va=0x%x, size=0x%x\n", m4u_get_module_name(eModuleID), BufAddr, BufSize); m4u_dump_maps(BufAddr); return -1; } if((vma->vm_flags) & VM_PFNMAP) { unsigned int bufEnd = BufAddr + BufSize -1; if(bufEnd > vma->vm_end) { M4UMSG("error: page_num=%d,module=%s, va=0x%x, size=0x%x, vm_flag=0x%x\n", page_num, m4u_get_module_name(eModuleID), BufAddr, BufSize, vma->vm_flags); M4UMSG("but vma is: vm_start=0x%x, vm_end=0x%x\n", vma->vm_start, vma->vm_end); up_read(&current->mm->mmap_sem); return -1; } for(i=0; i<page_num; i++) { unsigned int va_align = BufAddr&(~M4U_PAGE_MASK); *(pPhys+i) = m4u_user_v2p(va_align + 0x1000*i); } M4UINFO("alloc_mva VM_PFNMAP module=%s, va=0x%x, size=0x%x, vm_flag=0x%x\n", m4u_get_module_name(eModuleID), BufAddr, BufSize, vma->vm_flags); up_read(&current->mm->mmap_sem); return 0; } else { ret = _m4u_get_pages_user(eModuleID, BufAddr, BufSize, page_num, vma, pPhys); up_read(&current->mm->mmap_sem); return ret; } } static int m4u_get_pages(M4U_MODULE_ID_ENUM eModuleID, unsigned int BufAddr, unsigned int BufSize, unsigned int* pPhys) { int ret,i; int page_num; unsigned int start_pa; unsigned int write_mode = 0; struct vm_area_struct *vma = NULL; M4ULOG("^ m4u_get_pages: module=%s, BufAddr=0x%x, BufSize=%d \n", m4u_get_module_name(eModuleID), BufAddr, BufSize); // caculate page number page_num = (BufSize + (BufAddr&0xfff))/DEFAULT_PAGE_SIZE; if((BufAddr+BufSize)&0xfff) { page_num++; } if(M4U_CLNTMOD_LCDC_UI==eModuleID) { for(i=0;i<page_num;i++) { pPhys[i] = (BufAddr&0xfffff000) + i*DEFAULT_PAGE_SIZE; } } else if(BufAddr<PAGE_OFFSET) // from user space { ret = m4u_get_pages_user(eModuleID, BufAddr, BufSize, page_num, pPhys); if(ret) return ret; } else // from kernel space { if(BufAddr>=VMALLOC_START && BufAddr<=VMALLOC_END) // vmalloc { struct page * ppage; for(i=0;i<page_num;i++) { ppage=vmalloc_to_page((unsigned int *)(BufAddr + i*DEFAULT_PAGE_SIZE)); *(pPhys+i) = page_to_phys(ppage) & 0xfffff000 ; } } else // kmalloc { for(i=0;i<page_num;i++) { *(pPhys+i) = virt_to_phys((void*)((BufAddr&0xfffff000) + i*DEFAULT_PAGE_SIZE)); } } M4UDBG("\n [kernel verify] BufAddr_sv=0x%x, BufAddr_sp=0x%x, BufAddr_ev=0x%x, BufAddr_ep=0x%x \n", BufAddr, virt_to_phys((void*)BufAddr), BufAddr+BufSize-1, virt_to_phys(BufAddr+BufSize-4)); } return page_num; } static int m4u_get_pages_sg(M4U_MODULE_ID_ENUM eModuleID, unsigned int BufAddr, unsigned int BufSize, struct sg_table* sg_table, unsigned int* pPhys) { int i,j; int page_num, map_page_num; struct scatterlist *sg; M4ULOG("^ m4u_get_pages_sg: module=%s, BufAddr=0x%x, BufSize=%d \n", m4u_get_module_name(eModuleID), BufAddr, BufSize); // caculate page number page_num = (BufSize + (BufAddr&0xfff))/DEFAULT_PAGE_SIZE; if((BufAddr+BufSize)&0xfff) { page_num++; } map_page_num = 0; for_each_sg(sg_table->sgl, sg, sg_table->nents, i) { int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE; struct page *page = sg_page(sg); for (j = 0; j < npages_this_entry; j++) { *(pPhys+map_page_num) = page_to_phys(page++) & 0xfffff000; map_page_num++; BUG_ON(map_page_num > page_num); } } return map_page_num; } static int m4u_release_pages(M4U_MODULE_ID_ENUM eModuleID, unsigned int BufAddr, unsigned int BufSize, unsigned int MVA, struct sg_table* sg_table) { unsigned int page_num=0, i=0; unsigned int start_pa; struct page *page; M4ULOG("m4u_release_pages(), module=%s, BufAddr=0x%x, BufSize=0x%x\n", m4u_get_module_name(eModuleID), BufAddr, BufSize); if(!mva_owner_match(eModuleID, mva2module(MVA))) { m4u_aee_print("release page fail!: larb=%d,module=%s\n", m4u_module_2_larb(eModuleID), m4u_get_module_name(eModuleID)); M4UMSG("m4u_release_pages module=%s, MVA=0x%x, expect module is %s \n", m4u_get_module_name(eModuleID), MVA, m4u_get_module_name(mva2module(MVA))); m4u_mvaGraph_dump(); } if(M4U_CLNTMOD_LCDC_UI==eModuleID) { goto RELEASE_FINISH; } if(BufAddr<PAGE_OFFSET && sg_table==NULL) // from user space { // put page by finding PA in pagetable unsigned int* pPageTableAddr = mva_pteAddr(MVA); page_num = (BufSize + (BufAddr&0xfff))/DEFAULT_PAGE_SIZE; if((BufAddr+BufSize)&0xfff) { page_num++; } for(i=0;i<page_num;i++) { start_pa = *(pPageTableAddr+i); if((start_pa&0x02)==0) { continue; } else if(!(start_pa & 0x20)) { continue; } else { page = pfn_to_page(__phys_to_pfn(start_pa)); if(page_count(page)>0) { if((m4u_put_unlock_page(page))) { M4UMSG("warning: put_unlock_page fail module=%s, va=0x%x, size=0x%x,mva=0x%x (page is unlocked before put page)\n", m4u_get_module_name(eModuleID), BufAddr, BufSize, MVA); M4UMSG("i=%d (%d)\n", i, page_num); } } else { M4UMSG("page_count is 0: pfn=%d\n", page_to_pfn(page)); dump_page(page); } pmodule_locked_pages[eModuleID]--; } *(pPageTableAddr+i) = 0; } } RELEASE_FINISH: // record memory usage if(pmodule_current_size[eModuleID]<BufSize) { pmodule_current_size[eModuleID] = 0; M4UMSG("error pmodule_current_size is less than BufSize, module=%s, current_size=%d, BufSize=%d \n", m4u_get_module_name(eModuleID), pmodule_current_size[eModuleID], BufSize); } else { pmodule_current_size[eModuleID] -= BufSize; } return 0; } // Refer to dma_cache_maint(). // The function works for user virtual addr #define BUFFER_SIZE_FOR_FLUSH_ALL (864*480*2) int L1_CACHE_SYNC_BY_RANGE_ONLY = 1; #define __M4U_CACHE_SYCN_USING_KERNEL_MAP__ #ifndef __M4U_CACHE_SYCN_USING_KERNEL_MAP__ static M4U_DMA_DIR_ENUM m4u_get_dir_by_module(M4U_MODULE_ID_ENUM eModuleID) { return M4U_DMA_READ_WRITE; } int m4u_do_dma_cache_maint(M4U_MODULE_ID_ENUM eModuleID, const void *start, size_t size, int direction) { void (*outer_op)(phys_addr_t start, phys_addr_t end); // void (*outer_op)(unsigned long, unsigned long); void (*outer_op_all)(void); unsigned int page_start, page_num; unsigned int *pPhy = NULL; int i, ret=0; PROFILE_TYPE ptype=PROFILE_DMA_MAINT_ALL; switch (direction) { case DMA_FROM_DEVICE: if(size < BUFFER_SIZE_FOR_FLUSH_ALL) ptype = PROFILE_DMA_INVALID_RANGE; else ptype = PROFILE_DMA_INVALID_ALL; break; case DMA_TO_DEVICE: if(size < BUFFER_SIZE_FOR_FLUSH_ALL) ptype = PROFILE_DMA_CLEAN_RANGE; else ptype = PROFILE_DMA_CLEAN_ALL; break; case DMA_BIDIRECTIONAL: if(size < BUFFER_SIZE_FOR_FLUSH_ALL) ptype = PROFILE_DMA_FLUSH_RANGE; else ptype = PROFILE_DMA_FLUSH_ALL; break; default: break; } MMProfileLogEx(M4U_MMP_Events[ptype], MMProfileFlagStart, eModuleID, (unsigned int)start); M4ULOG(" m4u_dma_cache_maint(): module=%s, start=0x%x, size=%d, direction=%d \n", m4u_get_module_name(eModuleID), (unsigned int)start, size, direction); if(0==start) { m4u_aee_print("cache sync fail!: larb=%d,module=%s\n", m4u_module_2_larb(eModuleID), m4u_get_module_name(eModuleID)); M4UMSG(" m4u_dma_cache_maint(): module=%s, start=0x%x, size=%d, direction=%d \n", m4u_get_module_name(eModuleID), (unsigned int)start, size, direction); return -1; } mutex_lock(&gM4uMutex); //To avoid non-cache line align cache corruption, user should make sure //cache start addr and size both cache-line-bytes align //we check start addr here but size should be checked in memory allocator //Rotdma memory is allocated by surfacefligner, address is not easy to modify //so do not check them now, should followup after MP if( m4u_get_dir_by_module(eModuleID)== M4U_DMA_WRITE && (((unsigned int)start%L1_CACHE_BYTES!=0) || (size%L1_CACHE_BYTES)!=0) ) { if(1) //screen red in debug mode { m4u_aee_print("Buffer align error: larb=%d,module=%s,addr=0x%x,size=%d,align=%d\n", m4u_module_2_larb(eModuleID), m4u_get_module_name(eModuleID), (unsigned int)start, size, L1_CACHE_BYTES); M4UMSG("error: addr un-align, module=%s, addr=0x%x, size=0x%x, process=%s, align=0x%x\n", m4u_get_module_name(eModuleID), m4u_get_module_name(eModuleID), (unsigned int)start, size, current->comm, L1_CACHE_BYTES); } else { M4UMSG("error: addr un-align, module=%s, addr=0x%x, size=0x%x, process=%s, align=0x%x\n", m4u_get_module_name(eModuleID), (unsigned int)start, size, current->comm, L1_CACHE_BYTES); } } switch (direction) { case DMA_FROM_DEVICE: /* invalidate only, HW write to memory */ //M4UMSG("error: someone call cache maint with DMA_FROM_DEVICE, module=%s\n",m4u_get_module_name(eModuleID)); outer_op = outer_inv_range; outer_op_all = outer_inv_all; break; case DMA_TO_DEVICE: /* writeback only, HW read from memory */ outer_op = outer_clean_range; outer_op_all = outer_flush_all; break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ outer_op = outer_flush_range; outer_op_all = outer_flush_all; break; default: M4UERR("m4u_dma_cache_maint, direction=%d is invalid \n", direction); return -1; } //<=========================================================================== //< check wether input buffer is valid (has physical pages allocated) page_start = (unsigned int)start & 0xfffff000; page_num = (size + ((unsigned int)start & 0xfff)) / DEFAULT_PAGE_SIZE; if(((unsigned int)start + size) & 0xfff) page_num++; if(size < BUFFER_SIZE_FOR_FLUSH_ALL) { pPhy = kmalloc(sizeof(int)*page_num, GFP_KERNEL); if(pPhy == NULL) { M4UMSG("error to kmalloc in m4u_cache_maint: module=%s, start=0x%x, size=%d, direction=%d \n", m4u_get_module_name(eModuleID), (unsigned int)start, size, direction); goto out; } if((unsigned int)start<PAGE_OFFSET) // from user space { for(i=0; i<page_num; i++,page_start+=DEFAULT_PAGE_SIZE) { //struct page* page; pPhy[i] = m4u_user_v2p(page_start); //page = phys_to_page(pPhy[i]); if((pPhy[i]==0))// || (!PageMlocked(page))) { ret=-1; M4UMSG("error: cache_maint() fail, module=%s, start=0x%x, page_start=0x%x, size=%d, pPhy[i]=0x%x\n", m4u_get_module_name(eModuleID), (unsigned int)start, (unsigned int)page_start, size, pPhy[i]); //dump_page(page); m4u_dump_maps((unsigned int)start); goto out; } } } else if((unsigned int)start>=VMALLOC_START && (unsigned int)start<=VMALLOC_END) // vmalloc { struct page * ppage; for(i=0; i<page_num; i++,page_start+=DEFAULT_PAGE_SIZE) { ppage=vmalloc_to_page((void *)page_start); if(ppage == NULL) { ret=-1; M4UMSG("error: ppage is 0 in cache_maint of vmalloc!, module=%s, start=0x%x, pagestart=0x%x\n", m4u_get_module_name(eModuleID), (unsigned int)start,page_start); goto out; } pPhy[i] = page_to_phys(ppage); } } else // kmalloc { for(i=0; i<page_num; i++,page_start+=DEFAULT_PAGE_SIZE) { pPhy[i] = virt_to_phys((void*)page_start); } } } //===================================================================================== // L1 cache clean before hw read if(L1_CACHE_SYNC_BY_RANGE_ONLY) { if (direction == DMA_TO_DEVICE) { dmac_map_area(start, size, direction); } if (direction == DMA_BIDIRECTIONAL) { dmac_flush_range(start, start+size-1); } } else { smp_inner_dcache_flush_all(); } //============================================================================================= // L2 cache maintenance by physical pages if(size<BUFFER_SIZE_FOR_FLUSH_ALL) { for (i=0; i<page_num; i++) { outer_op(pPhy[i], pPhy[i]+ DEFAULT_PAGE_SIZE); } } else { outer_op_all(); } //========================================================================================= // L1 cache invalidate after hw write to memory if(L1_CACHE_SYNC_BY_RANGE_ONLY) { if (direction == DMA_FROM_DEVICE) { dmac_unmap_area(start, size, direction); } } out: if(pPhy != NULL) kfree(pPhy); MMProfileLogEx(M4U_MMP_Events[ptype], MMProfileFlagEnd, eModuleID, size); mutex_unlock(&gM4uMutex); return ret; } static int m4u_cache_sync_init(void) { return 0; } #else static unsigned int m4u_cache_v2p(unsigned int va) { unsigned int pageOffset = (va & (PAGE_SIZE - 1)); pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned int pa; if(NULL==current) { M4UMSG("warning: m4u_user_v2p, current is NULL! \n"); return 0; } if(NULL==current->mm) { M4UMSG("warning: m4u_user_v2p, current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm); return 0; } pgd = pgd_offset(current->mm, va); /* what is tsk->mm */ if(pgd_none(*pgd)||pgd_bad(*pgd)) { M4UMSG("m4u_user_v2p(), va=0x%x, pgd invalid! \n", va); return 0; } pud = pud_offset(pgd, va); if(pud_none(*pud)||pud_bad(*pud)) { M4UMSG("m4u_user_v2p(), va=0x%x, pud invalid! \n", va); return 0; } pmd = pmd_offset(pud, va); if(pmd_none(*pmd)||pmd_bad(*pmd)) { M4UMSG("m4u_user_v2p(), va=0x%x, pmd invalid! \n", va); return 0; } pte = pte_offset_map(pmd, va); if(pte_present(*pte)) { pa=(pte_val(*pte) & (PAGE_MASK)) | pageOffset; pte_unmap(pte); return pa; } pte_unmap(pte); M4UMSG("m4u_user_v2p(), va=0x%x, pte invalid! \n", va); // m4u_dump_maps(va); return 0; } static struct page* m4u_cache_get_page(unsigned int va) { unsigned int pa, start; struct page *page; start = va & (~M4U_PAGE_MASK); pa = m4u_cache_v2p(start); if((pa==0)) { M4UMSG("error m4u_get_phys user_v2p return 0 on va=0x%x\n", start); //dump_page(page); m4u_dump_maps((unsigned int)start); return NULL; } page = phys_to_page(pa); return page; } static int __m4u_cache_sync_kernel(const void *start, size_t size, int direction) { if (direction == DMA_TO_DEVICE) //clean { dmac_map_area((void*)start, size, DMA_TO_DEVICE); } else if (direction == DMA_FROM_DEVICE) // invalid { dmac_unmap_area((void*)start, size, DMA_FROM_DEVICE); } else if (direction == DMA_BIDIRECTIONAL) //flush { dmac_flush_range((void*)start, (void*)(start+size-1)); } return 0; } static struct vm_struct *cache_map_vm_struct = NULL; static int m4u_cache_sync_init(void) { cache_map_vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC); if (!cache_map_vm_struct) return -ENOMEM; return 0; } static void* m4u_cache_map_page_va(struct page* page) { int ret; struct page** ppPage = &page; ret = map_vm_area(cache_map_vm_struct, PAGE_KERNEL, &ppPage); if(ret) { M4UMSG("error to map page\n"); return NULL; } return cache_map_vm_struct->addr; } static void m4u_cache_unmap_page_va(unsigned int va) { unmap_kernel_range((unsigned long)cache_map_vm_struct->addr, PAGE_SIZE); } //lock to protect cache_map_vm_struct static DEFINE_MUTEX(gM4u_cache_sync_user_lock); static int __m4u_cache_sync_user(unsigned int start, size_t size, int direction) { unsigned int map_size, map_start, map_end; unsigned int end = start+size; struct page* page; unsigned int map_va, map_va_align; int ret = 0; mutex_lock(&gM4u_cache_sync_user_lock); if(!cache_map_vm_struct) { M4UMSG(" error: cache_map_vm_struct is NULL, retry\n"); m4u_cache_sync_init(); } if(!cache_map_vm_struct) { M4UMSG("error: cache_map_vm_struct is NULL, no vmalloc area\n"); ret = -1; goto out; } M4ULOG("__m4u_sync_user: start=0x%x, size=0x%x\n", start, size); map_start = start; while(map_start < end) { map_end = min( (map_start&(~M4U_PAGE_MASK))+M4U_PAGE_SIZE, end); map_size = map_end - map_start; page = m4u_cache_get_page(map_start); if(!page) { ret = -1; goto out; } map_va = (unsigned int)m4u_cache_map_page_va(page); if(!map_va) { ret = -1; goto out; } map_va_align = map_va | (map_start&(M4U_PAGE_SIZE-1)); M4ULOG("__m4u_sync_user: map_start=0x%x, map_size=0x%x, map_va=0x%x\n", map_start, map_size, map_va_align); __m4u_cache_sync_kernel((void*)map_va_align, map_size, direction); m4u_cache_unmap_page_va(map_va); map_start = map_end; } out: mutex_unlock(&gM4u_cache_sync_user_lock); return ret; } int m4u_do_dma_cache_maint(M4U_MODULE_ID_ENUM eModuleID, const void *va, size_t size, int direction) { // By range operation unsigned int page_num; int ret = 0; MMProfileLogEx(M4U_MMP_Events[PROFILE_DMA_MAINT_ALL], MMProfileFlagStart, (unsigned int)va, size); //MMProfileLogEx(M4U_MMP_Events[PROFILE_DMA_MAINT_ALL], MMProfileFlagPulse, eModuleID, direction); if( (((unsigned int)va%L1_CACHE_BYTES!=0) || (size%L1_CACHE_BYTES)!=0)) { M4UMSG("Buffer align error: larb=%d,module=%s,addr=0x%x,size=%d,align=%d\n", m4u_module_2_larb(eModuleID), m4u_get_module_name(eModuleID), (unsigned int)va, size, L1_CACHE_BYTES); M4UMSG("error: addr un-align, module=%s, addr=0x%x, size=0x%x, process=%s, align=0x%x\n", m4u_get_module_name(eModuleID), (unsigned int)va, size, current->comm, L1_CACHE_BYTES); } page_num = M4U_GET_PAGE_NUM(va, size); if((unsigned int)va<PAGE_OFFSET) // from user space { ret = __m4u_cache_sync_user((unsigned int)va, size, direction); } else { ret = __m4u_cache_sync_kernel(va, size, direction); } M4ULOG("cache_sync: module=%s, addr=0x%x, size=0x%x\n", m4u_get_module_name(eModuleID), m4u_get_module_name(eModuleID), (unsigned int)va, size); MMProfileLogEx(M4U_MMP_Events[PROFILE_DMA_MAINT_ALL], MMProfileFlagEnd, ((unsigned int)eModuleID<<16)|direction, ret); return ret; } #endif #define M4U_PAGE_TABLE_ALIGN (PT_TOTAL_ENTRY_NUM*sizeof(unsigned int) - 1) // page table addr should (2^16)x align #define M4U_PROTECT_BUF_OFFSET (128-1) // protect buffer should be 128x align static bool m4u_struct_init(void) { unsigned int *pProtectVA=NULL; //Page Table virtual Address struct page* tmp_page = NULL; int i; for(i=0; i<M4U_CLIENT_MODULE_NUM; i++) gModuleMaxMVASize[i] = (400*0x00100000); //======= alloc pagetable======================= pPT_nonsec= dma_alloc_coherent(NULL, PT_TOTAL_ENTRY_NUM * sizeof(unsigned int), &pt_pa_nonsec, GFP_KERNEL); if(!pPT_nonsec) { M4UMSG("dma_alloc_coherent error! dma memory not available.\n"); return false; } if((pt_pa_nonsec&M4U_PAGE_TABLE_ALIGN)!=0) { unsigned int tmp; M4UMSG("dma_alloc_coherent memory not align. PageTablePA=0x%x we will try again \n", pt_pa_nonsec); dma_free_coherent(NULL, PT_TOTAL_ENTRY_NUM * sizeof(unsigned int), pPT_nonsec, pt_pa_nonsec); tmp = (unsigned int)dma_alloc_coherent(NULL, PT_TOTAL_ENTRY_NUM * sizeof(unsigned int)+M4U_PAGE_TABLE_ALIGN, &pt_pa_nonsec, GFP_KERNEL); if(!tmp) { M4UMSG("dma_alloc_coherent error! dma memory not available.\n"); return false; } pPT_nonsec = (unsigned int*)((tmp+M4U_PAGE_TABLE_ALIGN)&(~M4U_PAGE_TABLE_ALIGN)); pt_pa_nonsec += (unsigned int)pPT_nonsec - tmp; } M4UMSG("dma_alloc_coherent success! pagetable_va=0x%x, pagetable_pa=0x%x.\n", (unsigned int)pPT_nonsec, (unsigned int)pt_pa_nonsec); memset((void*)pPT_nonsec, 0, PT_TOTAL_ENTRY_NUM * sizeof(unsigned int)); //======= alloc pagetable done======================= #ifdef M4U_USE_ONE_PAGETABLE pPT_sec = pPT_nonsec; pt_pa_sec = pt_pa_nonsec; #else //======= alloc pagetable for security pt======================= pPT_sec= dma_alloc_coherent(NULL, PT_TOTAL_ENTRY_NUM * sizeof(unsigned int), &pt_pa_sec, GFP_KERNEL); if(!pPT_sec) { M4UMSG("dma_alloc_coherent error for sec pt! dma memory not available.\n"); return false; } if((pt_pa_sec&M4U_PAGE_TABLE_ALIGN)!=0) { unsigned int tmp; M4UMSG("dma_alloc_coherent memory not align. PageTablePA=0x%x we will try again \n", pt_pa_sec); dma_free_coherent(NULL, PT_TOTAL_ENTRY_NUM * sizeof(unsigned int), pPT_sec, pt_pa_sec); tmp = (unsigned int)dma_alloc_coherent(NULL, PT_TOTAL_ENTRY_NUM * sizeof(unsigned int)+M4U_PAGE_TABLE_ALIGN, &pt_pa_sec, GFP_KERNEL); if(!tmp) { M4UMSG("dma_alloc_coherent error! dma memory not available.\n"); return false; } pPT_sec = (unsigned int*)((tmp+M4U_PAGE_TABLE_ALIGN)&(~M4U_PAGE_TABLE_ALIGN)); pt_pa_sec += (unsigned int)pPT_sec - tmp; } M4UMSG("dma_alloc_coherent success! pagetable_va=0x%x, pagetable_pa=0x%x.\n", (unsigned int)pPT_sec, (unsigned int)pt_pa_sec); memset((void*)pPT_sec, 0, PT_TOTAL_ENTRY_NUM * sizeof(unsigned int)); //======= alloc pagetable done======================= #endif init_mlock_cnt(); if(NULL==pMlock_cnt) return false; // allocate 128 byte for translation fault protection // when TF occurs, M4U will translate the physical address to ProtectPA pProtectVA = (unsigned int*) kmalloc(TF_PROTECT_BUFFER_SIZE*TOTAL_M4U_NUM+M4U_PROTECT_BUF_OFFSET, GFP_KERNEL|__GFP_ZERO); if(NULL==pProtectVA) { M4UMSG("Physical memory not available.\n"); return false; } pProtectVA = (unsigned int*)(((unsigned int)pProtectVA+M4U_PROTECT_BUF_OFFSET)&(~M4U_PROTECT_BUF_OFFSET)); ProtectPA = virt_to_phys(pProtectVA); if((ProtectPA&0x7f)!=0) { M4UERR("Physical memory not align. ProtectPA=0x%x \n", ProtectPA); } pProtectVA_nonCache = pProtectVA; memset((unsigned char*)pProtectVA_nonCache, 0x55, TF_PROTECT_BUFFER_SIZE*TOTAL_M4U_NUM); tmp_page = alloc_page(GFP_KERNEL|__GFP_ZERO); // gM4U_align_page_va = (unsigned int)page_address(tmp_page); gM4U_align_page_pa = (unsigned int)page_to_phys(tmp_page); M4UMSG("gM4U_align_page_pa is 0x%x\n", gM4U_align_page_pa); M4UDBG("ProtectTablePA:0x%x, ProtectTableVA:0x%x, pProtectVA_nonCache:0x%x \n", ProtectPA, (unsigned int)pProtectVA, (unsigned int)pProtectVA_nonCache); //initialize global variables pRangeDes = kmalloc(sizeof(M4U_RANGE_DES_T) * TOTAL_RANGE_NUM, GFP_KERNEL|__GFP_ZERO); if(NULL==pRangeDes) { M4UMSG("Physical memory not available.\n"); return false; } pWrapDes = kmalloc(sizeof(M4U_WRAP_DES_T) * TOTAL_WRAP_NUM, GFP_KERNEL|__GFP_ZERO); if(NULL==pWrapDes) { M4UMSG("Physical memory not available.\n"); return false; } pM4URegBackUp = (unsigned int*)kmalloc(BACKUP_REG_SIZE, GFP_KERNEL|__GFP_ZERO); if(pM4URegBackUp==NULL) { M4UERR("pM4URegBackUp kmalloc fail \n"); } m4u_hw_init(); gM4uLogFlag = false; return 0; } /** * @brief , system power on / return from power resume * @param * @return */ static int m4u_hw_init(void) { unsigned int i; unsigned regval; M4UDBG("m4u_hw_init() \n"); m4u_clock_on(); //============================================= // SMI registers //============================================= //bus sel /* regval = F_SMI_BUS_SEL_larb0(larb_2_m4u_id(0)) \ |F_SMI_BUS_SEL_larb1(larb_2_m4u_id(1)) \ |F_SMI_BUS_SEL_larb2(larb_2_m4u_id(2)) \ |F_SMI_BUS_SEL_larb3(larb_2_m4u_id(3)) \ |F_SMI_BUS_SEL_larb4(larb_2_m4u_id(4)) ; M4UDBG("regval = 0x%x\n", regval); COM_WriteReg32(REG_SMI_BUS_SEL, regval); M4UMSG("bus = 0x%x\n", COM_ReadReg32(REG_SMI_BUS_SEL)); */ // secure register: // all use physical (bypass m4u); domain(3); secure(0) for(i=0; i<7; i++) COM_WriteReg32(REG_SMI_SECUR_CON(i), 0x66666666); //============================================= // m4u global registers //============================================ //set m4u pagetable base address COM_WriteReg32(REG_MMUg_PT_BASE, (unsigned int)pt_pa_nonsec); COM_WriteReg32(REG_MMUg_PT_BASE_SEC, (unsigned int)pt_pa_sec); COM_WriteReg32(REG_MMUg_DCM, F_MMUg_DCM_ON(1)); //============================================= // L2 registers //============================================= if(gM4U_L2_enable) { regval = F_L2_GDC_BYPASS(0); } else { regval = F_L2_GDC_BYPASS(1); } /*|F_L2_GDC_LOCK_ALERT_DIS(0) \ |F_L2_GDC_LOCK_TH(3) \ */ regval |= F_L2_GDC_PERF_MASK(GDC_PERF_MASK_HIT_MISS) \ |F_L2_GDC_PAUSE_OP(GDC_NO_PAUSE); COM_WriteReg32(REG_L2_GDC_OP, regval); //=============================== // LARB //=============================== { int i; for(i=0; i<SMI_LARB_NR; i++) { larb_clock_on(i); //set SMI_SHARE_EN to 0 M4U_WriteReg32(gLarbBaseAddr[i], SMI_SHARE_EN, 0x0); //set SMI_ROUTE_SEL to 1 // M4U_WriteReg32(gLarbBaseAddr[i], SMI_ROUTE_SEL, 0xffffffff); M4UMSG("larb clock on %d\n", i); larb_clock_off(i); } } //============================================= // m4u registers //============================================= //for(i=0;i<TOTAL_M4U_NUM;i++) i=0; { regval = F_MMU_CTRL_PFH_DIS(0) \ |F_MMU_CTRL_TLB_WALK_DIS(0) \ |F_MMU_CTRL_MONITOR_EN(0) \ |F_MMU_CTRL_MONITOR_CLR(0) \ |F_MMU_CTRL_PFH_RT_RPL_MODE(0) \ |F_MMU_CTRL_TF_PROT_VAL(2) \ |F_MMU_CTRL_COHERE_EN(1) ; if(g_debug_enable_error_hang) regval |= F_MMU_CTRL_INT_HANG_en(1); M4U_WriteReg32(gM4UBaseAddr[i], REG_MMU_CTRL_REG, regval); // M4UMSG("ctl = 0x%x\n", M4U_ReadReg32(gM4UBaseAddr[i], REG_MMU_CTRL_REG)); //enable interrupt control except "Same VA-to-PA test" M4U_WriteReg32(gM4UBaseAddr[i], REG_MMU_INT_CONTROL, 0x7F); //disable non-blocking mode //M4U_WriteReg32(gM4UBaseAddr[i], REG_MMU_NON_BLOCKING_DIS, F_MMU_NON_BLOCK_DISABLE_BIT); M4U_WriteReg32(gM4UBaseAddr[i], REG_MMU_IVRP_PADDR, (unsigned int)ProtectPA); if(g_debug_dump_rs_in_isr) m4u_monitor_start(i); M4UDBG("init hw OK: %d \n",i); } //invalidate all TLB entry m4u_invalid_tlb_all(M4U_ID_ALL, gM4U_L2_enable); M4U_PORT_STRUCT port; port.Direction = 0; port.Distance = 1; port.domain = 3; port.Security = 0; port.Virtuality = 1; port.ePortID = MDP_RDMA; m4u_config_port(&port); port.ePortID = MDP_WDMA; m4u_config_port(&port); port.ePortID = MDP_ROTO; m4u_config_port(&port); port.ePortID = MDP_ROTCO; m4u_config_port(&port); port.ePortID = MDP_ROTVO; m4u_config_port(&port); return 0; } static void m4u_clear_intr(unsigned int m4u_base) { unsigned int Temp; Temp = M4U_ReadReg32(m4u_base, REG_MMU_INT_CONTROL) | F_INT_CLR_BIT; M4U_WriteReg32(m4u_base, REG_MMU_INT_CONTROL, Temp); } static int smi_reg_backup(void) { unsigned int* pReg = pM4URegBackUp; int m4u_id; int i; spin_lock(&gM4u_reg_lock); //flag (for debug) *(pReg++) = COM_ReadReg32(REG_MMUg_PT_BASE); //m4u reg backup for(m4u_id=0; m4u_id<2; m4u_id++) { unsigned int m4u_base = gM4UBaseAddr[m4u_id]; for(i=0; i<M4U_SEQ_NR; i++) { *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_SQ_START(i)); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_SQ_END(i)); } *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_PFH_DIST0); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_PFH_DIST1); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_PFH_DIST2); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_PFH_DIST3); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_PFH_DIST4); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_PFH_DIST5); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_PFH_DIST6); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_PFH_DIST16_0); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_PFH_DISTS16_1); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_PFH_DIR0); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_PFH_DIR1); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_CTRL_REG); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_IVRP_PADDR); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_INT_CONTROL); for(i=0; i<M4U_WRAP_NR; i++) { *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_WRAP_SA(i)); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_WRAP_EA(i)); } *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_WRAP_EN0); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_WRAP_EN1); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_WRAP_EN2); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_WRAP_EN3); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_WRAP_EN4); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_WRAP_EN5); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_WRAP_EN6); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_PFQ_BROADCAST_EN); *(pReg++) = M4U_ReadReg32(m4u_base, REG_MMU_NON_BLOCKING_DIS); } //M4U top registers *(pReg++) = COM_ReadReg32(REG_MMUg_CTRL); *(pReg++) = COM_ReadReg32(REG_MMUg_PT_BASE); *(pReg++) = COM_ReadReg32(REG_MMUg_DCM); *(pReg++) = COM_ReadReg32(REG_MMUg_CTRL_SEC); *(pReg++) = COM_ReadReg32(REG_MMUg_PT_BASE_SEC); //L2 cache registers *(pReg++) = COM_ReadReg32(REG_L2_GDC_OP); *(pReg++) = COM_ReadReg32(REG_L2_GDC_PERF0); *(pReg++) = COM_ReadReg32(REG_L2_GPE_STATUS_SEC); //SMI registers //*(pReg++) = COM_ReadReg32(REG_SMI_L1LEN); // *(pReg++) = COM_ReadReg32(REG_SMI_BUS_SEL); enable_mux(MT_MUX_MM, "m4u"); for(i=0; i<7; i++) *(pReg++) = COM_ReadReg32(REG_SMI_SECUR_CON(i)); disable_mux(MT_MUX_MM, "m4u"); spin_unlock(&gM4u_reg_lock); //M4UMSG("register backup buffer needs: %d \n", (unsigned int)pReg-(unsigned int)pM4URegBackUp); if(pt_pa_nonsec !=*pM4URegBackUp) { M4UERR("PT_BASE in memory is error after backup! expect PTPA=0x%x, backupReg=0x%x\n", pt_pa_nonsec, *pM4URegBackUp); } return 0; } static int smi_reg_restore(void) { unsigned int* pReg = pM4URegBackUp; int m4u_id; int i; spin_lock(&gM4u_reg_lock); //flag (for debug) COM_WriteReg32(REG_MMUg_PT_BASE, *(pReg++)); //m4u reg backup for(m4u_id=0; m4u_id<2; m4u_id++) { unsigned int m4u_base = gM4UBaseAddr[m4u_id]; for(i=0; i<M4U_SEQ_NR; i++) { M4U_WriteReg32(m4u_base, REG_MMU_SQ_START(i), *(pReg++)); M4U_WriteReg32(m4u_base, REG_MMU_SQ_END(i) , *(pReg++)); } M4U_WriteReg32(m4u_base, REG_MMU_PFH_DIST0 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_PFH_DIST1 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_PFH_DIST2 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_PFH_DIST3 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_PFH_DIST4 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_PFH_DIST5 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_PFH_DIST6 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_PFH_DIST16_0 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_PFH_DISTS16_1 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_PFH_DIR0 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_PFH_DIR1 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_CTRL_REG , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_IVRP_PADDR , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_INT_CONTROL , *(pReg++) ); for(i=0; i<M4U_WRAP_NR; i++) { M4U_WriteReg32(m4u_base, REG_MMU_WRAP_SA(i), *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_WRAP_EA(i), *(pReg++) ); } M4U_WriteReg32(m4u_base, REG_MMU_WRAP_EN0 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_WRAP_EN1 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_WRAP_EN2 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_WRAP_EN3 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_WRAP_EN4 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_WRAP_EN5 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_WRAP_EN6 , *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_PFQ_BROADCAST_EN, *(pReg++) ); M4U_WriteReg32(m4u_base, REG_MMU_NON_BLOCKING_DIS, *(pReg++) ); } //M4U top registers COM_WriteReg32(REG_MMUg_CTRL , *(pReg++) ); COM_WriteReg32(REG_MMUg_PT_BASE , *(pReg++) ); COM_WriteReg32(REG_MMUg_DCM , *(pReg++) ); COM_WriteReg32(REG_MMUg_CTRL_SEC , *(pReg++) ); COM_WriteReg32(REG_MMUg_PT_BASE_SEC , *(pReg++) ); //L2 cache registers COM_WriteReg32(REG_L2_GDC_OP , *(pReg++) ); COM_WriteReg32(REG_L2_GDC_PERF0 , *(pReg++) ); COM_WriteReg32(REG_L2_GPE_STATUS_SEC , *(pReg++) ); //SMI registers //COM_WriteReg32(REG_SMI_L1LEN, *(pReg++) ); // COM_WriteReg32(REG_SMI_BUS_SEL, *(pReg++) ); enable_mux(MT_MUX_MM, "m4u"); for(i=0; i<7; i++) COM_WriteReg32(REG_SMI_SECUR_CON(i), *(pReg++) ); disable_mux(MT_MUX_MM, "m4u"); if(COM_ReadReg32(REG_MMUg_PT_BASE) != pt_pa_nonsec) { M4UERR("PT_BASE is error after restore! 0x%x != 0x%x\n", COM_ReadReg32(REG_MMUg_PT_BASE), pt_pa_nonsec); } spin_unlock(&gM4u_reg_lock); return 0; } static char* m4u_get_port_name(M4U_PORT_ID_ENUM portID) { switch(portID) { case DISP_OVL_0 : return "DISP_OVL_0" ; case DISP_RDMA : return "DISP_RDMA" ; case DISP_WDMA : return "DISP_WDMA" ; case MM_CMDQ : return "MM_CMDQ" ; case MDP_RDMA : return "MDP_RDMA" ; case MDP_WDMA : return "MDP_WDMA" ; case MDP_ROTO : return "MDP_ROTO" ; case MDP_ROTCO : return "MDP_ROTCO" ; case MDP_ROTVO : return "MDP_ROTVO" ; case VDEC_MC_EXT : return "VDEC_MC_EXT" ; case VDEC_PP_EXT : return "VDEC_PP_EXT" ; case VDEC_AVC_MV_EXT : return "VDEC_AVC_MV_EXT" ; case VDEC_PRED_RD_EXT : return "VDEC_PRED_RD_EXT" ; case VDEC_PRED_WR_EXT : return "VDEC_PRED_WR_EXT" ; case VDEC_VLD_EXT : return "VDEC_VLD_EXT" ; case VDEC_PP_INT : return "VDEC_PP_INT" ; case CAM_IMGO : return "CAM_IMGO" ; case CAM_IMG2O : return "CAM_IMG2O" ; case CAM_LSCI : return "CAM_LSCI" ; case CAM_IMGI : return "CAM_IMGI" ; case CAM_ESFKO : return "CAM_ESFKO" ; case CAM_AAO : return "CAM_AAO" ; case JPGENC_RDMA : return "JPGENC_RDMA" ; case JPGENC_BSDMA : return "JPGENC_BSDMA" ; case VENC_RD_COMV : return "VENC_RD_COMV" ; case VENC_SV_COMV : return "VENC_SV_COMV" ; case VENC_RCPU : return "VENC_RCPU" ; case VENC_REC_FRM : return "VENC_REC_FRM" ; case VENC_REF_LUMA : return "VENC_REF_LUMA" ; case VENC_REF_CHROMA : return "VENC_REF_CHROMA" ; case VENC_BSDMA : return "VENC_BSDMA" ; case VENC_CUR_LUMA : return "VENC_CUR_LUMA" ; case VENC_CUR_CHROMA : return "VENC_CUR_CHROMA" ; case M4U_PORT_UNKNOWN : return "UNKNOWN"; default: M4UMSG("invalid module id=%d", portID); return "UNKNOWN"; } } static char* m4u_get_module_name(M4U_MODULE_ID_ENUM moduleID) { return m4u_get_port_name(moduleID); } static void m4u_memory_usage(bool bPrintAll) { unsigned int i=0; for(i=0;i<M4U_CLIENT_MODULE_NUM;i++) { M4UMSG("id=%-2d, name=%-10s, max=%-5dKB, current=%-5dKB, locked_page=%-3d \n", i, m4u_get_module_name(i), pmodule_max_size[i]/1024, pmodule_current_size[i]/1024, pmodule_locked_pages[i]); } } static void m4u_print_active_port(unsigned int m4u_index) { unsigned int i=0; unsigned int regval; M4UINFO("active ports: "); { for(i=0;i<M4U_PORT_NR;i++) { enable_mux(MT_MUX_MM, "m4u"); regval = m4uHw_get_field_by_mask(0, REG_SMI_SECUR_CON_OF_PORT(i), F_SMI_SECUR_CON_VIRTUAL(i)); disable_mux(MT_MUX_MM, "m4u"); if(regval) { printk(KERN_INFO"%s(%d), ", m4u_get_port_name(i),i); } } printk(KERN_INFO"\n"); } } static int m4u_dump_seq_range_info(void) { unsigned int i=0; M4UMSG(" MVA Range Info: \n"); for(i=0;i<TOTAL_RANGE_NUM;i++) { if(1==pRangeDes[i].Enabled) { M4UMSG("pRangeDes[%d]: Enabled=%d, module=%s, MVAStart=0x%x, MVAEnd=0x%x, entrycount=%d \n", i, pRangeDes[i].Enabled, m4u_get_module_name(pRangeDes[i].eModuleID), pRangeDes[i].MVAStart, pRangeDes[i].MVAEnd, pRangeDes[i].entryCount); } } return 0; } int m4u_do_dump_info(int m4u_index) { unsigned int i=0; M4UMSG(" MVA Range Info: \n"); m4u_dump_seq_range_info(); M4UMSG(" Wrap Range Info: \n"); for(i=0;i<TOTAL_WRAP_NUM;i++) { if(1==pWrapDes[i].Enabled) { M4UMSG("pWrapDes[%d]: Enabled=%d, module=%s, MVAStart=0x%x, MVAEnd=0x%x \n", i, pWrapDes[i].Enabled, m4u_get_port_name(pWrapDes[i].eModuleID), pWrapDes[i].MVAStart, pWrapDes[i].MVAEnd); } } m4u_dump_mva_info(); return 0; } int m4u_do_log_on(void) { unsigned int i=0; M4UMSG("m4u_log_on is called! \n"); gM4uLogFlag = true; m4u_dump_mva_info(); m4u_memory_usage(true); for(i=0;i<TOTAL_M4U_NUM;i++) { m4u_dump_info(i); m4u_print_active_port(i); } M4UMSG("m4u pagetable info: \n"); m4u_mvaGraph_dump(); return 0; } int m4u_do_log_off(void) { M4UMSG("m4u_log_off is called! \n"); gM4uLogFlag = false; return 0; } static int m4u_enable_prefetch(M4U_PORT_ID_ENUM PortID) { unsigned int m4u_base = gM4UBaseAddr[m4u_port_2_m4u_id(PortID)]; m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG, F_MMU_CTRL_PFH_DIS(1), F_MMU_CTRL_PFH_DIS(0)); return 0; } static int m4u_disable_prefetch(M4U_PORT_ID_ENUM PortID) { unsigned int m4u_base = gM4UBaseAddr[m4u_port_2_m4u_id(PortID)]; m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG, F_MMU_CTRL_PFH_DIS(1), F_MMU_CTRL_PFH_DIS(1)); return 0; } static int m4u_enable_error_hang(int m4u_id) { unsigned int m4u_base = gM4UBaseAddr[m4u_id]; m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG, F_MMU_CTRL_INT_HANG_en(1), F_MMU_CTRL_INT_HANG_en(1)); return 0; } static int m4u_disable_error_hang(int m4u_id) { unsigned int m4u_base = gM4UBaseAddr[m4u_id]; m4uHw_set_field_by_mask(m4u_base, REG_MMU_CTRL_REG, F_MMU_CTRL_INT_HANG_en(1), F_MMU_CTRL_INT_HANG_en(0)); return 0; } static int m4u_enable_L2_cache(void) { unsigned int regval; regval = F_L2_GDC_BYPASS(0) \ |F_L2_GDC_PERF_MASK(GDC_PERF_MASK_HIT_MISS) \ |F_L2_GDC_PAUSE_OP(GDC_NO_PAUSE); COM_WriteReg32(REG_L2_GDC_OP, regval); gM4U_L2_enable = 1; return 0; } static int m4u_disable_L2_cache(void) { unsigned int regval; regval = F_L2_GDC_BYPASS(1) \ |F_L2_GDC_PERF_MASK(GDC_PERF_MASK_HIT_MISS) \ |F_L2_GDC_PAUSE_OP(GDC_NO_PAUSE); COM_WriteReg32(REG_L2_GDC_OP, regval); gM4U_L2_enable = 0; return 0; } static int m4u_L2_enable_lock_alert(void) { return 0; } static int m4u_l2_disable_lock_alert(void) { return 0; } static int m4u_dump_user_addr_register(M4U_PORT_ID_ENUM port) { return 0; } int m4u_do_mva_map_kernel(unsigned int mva, unsigned int size, int sec, unsigned int* map_va, unsigned int* map_size) { struct page **pages; unsigned int page_num, map_page_num; unsigned int kernel_va, kernel_size; kernel_va = 0; kernel_size = 0; page_num = M4U_GET_PAGE_NUM(mva, size); pages = vmalloc(sizeof(struct page*)*page_num); if(pages == NULL) { M4UMSG("mva_map_kernel: error to vmalloc for %d\n", sizeof(struct page*)*page_num); } for(map_page_num=0; map_page_num<page_num; map_page_num++) { unsigned int pa; if(sec) pa = *(unsigned int*)mva_pteAddr_sec(mva+map_page_num*M4U_PAGE_SIZE); else pa = *(unsigned int*)mva_pteAddr_nonsec(mva+map_page_num*M4U_PAGE_SIZE); if((pa&F_DESC_VALID) != F_DESC_VALID) { break; } pages[map_page_num] = phys_to_page(pa); } if(map_page_num != page_num) { M4UMSG("mva_map_kernel: only get %d pages: mva=0x%x, size=0x%x\n", map_page_num, mva, size); goto error_out; } kernel_va = (unsigned int)vmap(pages, map_page_num, VM_MAP, PAGE_KERNEL); if(kernel_va == 0) { M4UMSG("mva_map_kernel: vmap fail: page_num=%d\n", map_page_num); goto error_out; } kernel_va += mva & (M4U_PAGE_MASK); *map_va = kernel_va; *map_size = size; error_out: vfree(pages); M4ULOG("mva_map_kernel: mva=0x%x,size=0x%x,sec=0x%x,map_va=0x%x,map_size=0x%x\n", mva, size, sec, *map_va, *map_size); return 0; } int m4u_do_mva_unmap_kernel(unsigned int mva, unsigned int size, unsigned int va) { M4ULOG("mva_unmap_kernel: mva=0x%x,size=0x%x,va=0x%x\n", mva, size, va); vunmap((void*)(va&(~M4U_PAGE_MASK))); return 0; } int m4u_do_debug_command(unsigned int command) { M4UMSG("m4u_debug_command, command=0x%x \n", command); switch(command) { case 0: g_debug_make_translation_fault = 0; break; case 1: g_debug_make_translation_fault = 1; break; case 2: g_debug_print_detail_in_isr = 0; break; case 3: g_debug_print_detail_in_isr = 1; break; case 4: m4u_enable_error_hang(0); g_debug_enable_error_hang= 1; break; case 5: m4u_disable_error_hang(0); g_debug_enable_error_hang= 0; break; case 6: g_debug_recover_pagetable_TF = 0; break; case 7: //start dynamic profile g_debug_recover_pagetable_TF = 1; break; case 8: // get profile report { //int i; //for(i=0;i<TOTAL_M4U_NUM;i++) //{ // m4u_monitor_start(m4u_get_port_by_index(i)); // start to count performance for next 1 second //} } break; case 9: //stop profile and get report { //int i; //for(i=0;i<TOTAL_M4U_NUM;i++) //{ // m4u_monitor_stop(m4u_get_port_by_index(i)); // print performance in last 1 second //} } break; case 10: m4u_monitor_start(0); m4u_monitor_start(1); g_debug_dump_rs_in_isr=1; break; case 11: m4u_monitor_stop(0); m4u_monitor_stop(1); g_debug_dump_rs_in_isr=0; break; case 12: M4UMSG("debug 12: dump mva info\n"); m4u_dump_mva_info(); break; case 13: M4UMSG("debug 13: L1 enable cache flush all\n"); L1_CACHE_SYNC_BY_RANGE_ONLY = 0; break; case 14: M4UMSG("debug 14: L1 cache flush by range only\n"); L1_CACHE_SYNC_BY_RANGE_ONLY = 1; break; case 15: M4UMSG("debug 15: set level to user \n"); gTestLevel = M4U_TEST_LEVEL_USER; break; case 16: M4UMSG("debug 16: set level to eng \n"); gTestLevel = M4U_TEST_LEVEL_ENG; break; case 17: M4UMSG("debug 17: set level to stress \n"); gTestLevel = M4U_TEST_LEVEL_STRESS; break; case 18: break; case 0xffffffff: break; default: M4UMSG("undefined command! \n"); } return 0; } static void m4u_print_mva_list(struct file *filep, const char *pMsg) { garbage_node_t *pNode = filep->private_data; mva_info_t *pList; struct list_head *pListHead; M4UMSG("print mva list [%s] ================================>\n", pMsg); mutex_lock(&(pNode->dataMutex)); list_for_each(pListHead, &(pNode->mvaList)) { pList = container_of(pListHead, mva_info_t, link); M4UMSG("module=%s, va=0x%x, size=0x%x, mva=0x%x, flags=%d\n", m4u_get_module_name(pList->eModuleId), pList->bufAddr, pList->size, pList->mvaStart, pList->flags); } mutex_unlock(&(pNode->dataMutex)); M4UMSG("print mva list done ==========================>\n"); } mva_info_t* m4u_alloc_garbage_list( unsigned int mvaStart, unsigned int bufSize, M4U_MODULE_ID_ENUM eModuleID, unsigned int va, unsigned int flags, int security, int cache_coherent) { mva_info_t *pList = NULL; pList = (mva_info_t*)kmalloc(sizeof(mva_info_t), GFP_KERNEL); if(pList==NULL) { M4UERR("m4u_add_to_garbage_list(), pList=0x%x\n", (unsigned int)pList); return NULL; } pList->mvaStart = mvaStart; pList->size = bufSize; pList->eModuleId = eModuleID; pList->bufAddr = va; pList->flags = flags; pList->security = security; pList->cache_coherent = cache_coherent; return pList; } static int m4u_free_garbage_list(mva_info_t *pList) { kfree(pList); return 0; } static int m4u_add_to_garbage_list(struct file * a_pstFile,mva_info_t *pList) { garbage_node_t *pNode = (garbage_node_t*)(a_pstFile->private_data); mutex_lock(&(pNode->dataMutex)); list_add(&(pList->link), &(pNode->mvaList)); mutex_unlock(&(pNode->dataMutex)); return 0; } static mva_info_t* m4u_delete_from_garbage_list(M4U_MOUDLE_STRUCT* p_m4u_module, struct file * a_pstFile) { struct list_head *pListHead; mva_info_t *pList = NULL; garbage_node_t *pNode = (garbage_node_t*)(a_pstFile->private_data); mva_info_t* ret=NULL; if(pNode==NULL) { M4UERR("m4u_delete_from_garbage_list(), pNode is NULL! \n"); return NULL; } mutex_lock(&(pNode->dataMutex)); list_for_each(pListHead, &(pNode->mvaList)) { pList = container_of(pListHead, mva_info_t, link); if((pList->mvaStart== p_m4u_module->MVAStart)) { if( (pList->bufAddr== p_m4u_module->BufAddr) && (pList->size == p_m4u_module->BufSize) && (pList->eModuleId == p_m4u_module->eModuleID) ) { list_del(pListHead); ret = pList; break; } else { ret=NULL; M4UMSG("error: input argument isn't valid, can't find the node at garbage list\n"); } } } if(pListHead == &(pNode->mvaList)) { ret=NULL; M4UMSG("error: input argument isn't valid, can't find the node at garbage list\n"); } mutex_unlock(&(pNode->dataMutex)); return ret; } static int m4u_suspend(struct platform_device *pdev, pm_message_t mesg) { smi_reg_backup(); M4ULOG("SMI backup in suspend \n"); return 0; } static int m4u_resume(struct platform_device *pdev) { smi_reg_restore(); M4UMSG("SMI restore in resume \n"); return 0; } /*---------------------------------------------------------------------------*/ #ifdef CONFIG_PM /*---------------------------------------------------------------------------*/ static int m4u_pm_suspend(struct device *device) { struct platform_device *pdev = to_platform_device(device); BUG_ON(pdev == NULL); return m4u_suspend(pdev, PMSG_SUSPEND); } static int m4u_pm_resume(struct device *device) { struct platform_device *pdev = to_platform_device(device); BUG_ON(pdev == NULL); return m4u_resume(pdev); } extern void mt_irq_set_sens(unsigned int irq, unsigned int sens); extern void mt_irq_set_polarity(unsigned int irq, unsigned int polarity); static int m4u_pm_restore_noirq(struct device *device) { M4ULOG("calling %s()\n", __func__); // m4u related irqs mt_irq_set_sens(MT6589_MMU0_IRQ_ID, MT_LEVEL_SENSITIVE); mt_irq_set_polarity(MT6589_MMU0_IRQ_ID, MT_POLARITY_LOW); mt_irq_set_sens(MT6589_MMU1_IRQ_ID, MT_LEVEL_SENSITIVE); mt_irq_set_polarity(MT6589_MMU1_IRQ_ID, MT_POLARITY_LOW); mt_irq_set_sens(MT6589_MMU_L2_IRQ_ID, MT_LEVEL_SENSITIVE); mt_irq_set_polarity(MT6589_MMU_L2_IRQ_ID, MT_POLARITY_LOW); mt_irq_set_sens(MT6589_MMU_L2_SEC_IRQ_ID, MT_LEVEL_SENSITIVE); mt_irq_set_polarity(MT6589_MMU_L2_SEC_IRQ_ID, MT_POLARITY_LOW); return 0; } /*---------------------------------------------------------------------------*/ #else /*CONFIG_PM*/ /*---------------------------------------------------------------------------*/ #define m4u_pm_suspend NULL #define m4u_pm_resume NULL #define m4u_pm_restore_noirq NULL /*---------------------------------------------------------------------------*/ #endif /*CONFIG_PM*/ /*---------------------------------------------------------------------------*/ struct dev_pm_ops m4u_pm_ops = { .suspend = m4u_pm_suspend, .resume = m4u_pm_resume, .freeze = m4u_pm_suspend, .thaw = m4u_pm_resume, .poweroff = m4u_pm_suspend, .restore = m4u_pm_resume, .restore_noirq = m4u_pm_restore_noirq, }; static struct platform_driver m4uDrv = { .probe = m4u_probe, .remove = m4u_remove, .suspend= m4u_suspend, .resume = m4u_resume, .driver = { .name = M4U_DEVNAME, #ifdef CONFIG_PM .pm = &m4u_pm_ops, #endif .owner = THIS_MODULE, } }; static int __init MTK_M4U_Init(void) { if(platform_driver_register(&m4uDrv)){ M4UMSG("failed to register MAU driver"); return -ENODEV; } return 0; } static void __exit MTK_M4U_Exit(void) { platform_driver_unregister(&m4uDrv); } int unused_for_build_warnings(void) { m4u_mvaGraph_dump_raw(); m4u_dump_pfh_tlb_tags(0); m4u_search_main_invalid(0); m4u_invalid_tlb_sec_by_range(0, 0, 0, 0); m4u_L2_prefetch(0, 0, 0); m4u_dump_rs_info(0); m4u_clock_off(); m4u_profile_init(); m4u_invalid_seq_all(0); m4u_L2_monitor_start(0); m4u_L2_monitor_stop(0); m4u_print_perf_counter(0, NULL); m4u_enable_prefetch(0); m4u_disable_prefetch(0); m4u_enable_L2_cache(); m4u_disable_L2_cache(); m4u_L2_enable_lock_alert(); m4u_l2_disable_lock_alert(); return 0; } module_init(MTK_M4U_Init); module_exit(MTK_M4U_Exit); MODULE_DESCRIPTION("MTK M4U driver"); MODULE_AUTHOR("MTK81044 <k.zhang@mediatek.com>"); MODULE_LICENSE("GPL");
gpl-2.0
lengxu/suricata
src/detect-engine-hscd.c
6
59967
/* Copyright (C) 2007-2010 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free * Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * version 2 along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ /** * \ingroup httplayer * * @{ */ /** * \file * * \author Anoop Saldanha <anoopsaldanha@gmail.com> */ #include "suricata-common.h" #include "suricata.h" #include "decode.h" #include "detect.h" #include "detect-engine.h" #include "detect-engine-mpm.h" #include "detect-engine-hscd.h" #include "detect-parse.h" #include "detect-engine-state.h" #include "detect-engine-content-inspection.h" #include "flow-util.h" #include "util-debug.h" #include "util-print.h" #include "flow.h" #include "stream-tcp.h" #include "app-layer-parser.h" #include "util-unittest.h" #include "util-unittest-helper.h" #include "app-layer.h" #include "app-layer-htp.h" #include "app-layer-protos.h" /** * \brief Run the mpm against http stat code. * * \retval cnt Number of matches reported by the mpm algo. */ int DetectEngineRunHttpStatCodeMpm(DetectEngineThreadCtx *det_ctx, Flow *f, HtpState *htp_state, uint8_t flags, void *txv, uint64_t idx) { SCEnter(); uint32_t cnt = 0; htp_tx_t *tx = (htp_tx_t *)txv; if (tx->response_status == NULL) goto end; cnt = HttpStatCodePatternSearch(det_ctx, (uint8_t *)bstr_ptr(tx->response_status), bstr_len(tx->response_status), flags); end: SCReturnInt(cnt); } /** * \brief Do the http_stat_code content inspection for a signature. * * \param de_ctx Detection engine context. * \param det_ctx Detection engine thread context. * \param s Signature to inspect. * \param f Flow. * \param flags App layer flags. * \param state App layer state. * * \retval 0 No match. * \retval 1 Match. */ int DetectEngineInspectHttpStatCode(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Signature *s, Flow *f, uint8_t flags, void *alstate, void *txv, uint64_t tx_id) { htp_tx_t *tx = (htp_tx_t *)txv; if (tx->response_status == NULL) { if (AppLayerParserGetStateProgress(IPPROTO_TCP, ALPROTO_HTTP, tx, flags) > HTP_RESPONSE_LINE) return DETECT_ENGINE_INSPECT_SIG_CANT_MATCH; else return DETECT_ENGINE_INSPECT_SIG_NO_MATCH; } det_ctx->discontinue_matching = 0; det_ctx->buffer_offset = 0; det_ctx->inspection_recursion_counter = 0; int r = DetectEngineContentInspection(de_ctx, det_ctx, s, s->sm_lists[DETECT_SM_LIST_HSCDMATCH], f, (uint8_t *)bstr_ptr(tx->response_status), bstr_len(tx->response_status), 0, DETECT_ENGINE_CONTENT_INSPECTION_MODE_HSCD, NULL); if (r == 1) return DETECT_ENGINE_INSPECT_SIG_MATCH; else return DETECT_ENGINE_INSPECT_SIG_CANT_MATCH; } /***********************************Unittests**********************************/ #ifdef UNITTESTS static int DetectEngineHttpStatCodeTest01(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200 message\r\n" "Content-Type: text/html\r\n" "Content-Length: 7\r\n" "\r\n" "message"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:\"200\"; http_stat_code; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if ((PacketAlertCheck(p1, 1))) { printf("sid 1 matched but shouldn't have\n"); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (!(PacketAlertCheck(p2, 1))) { printf("sid 1 didn't match but should have"); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest02(void) { TcpSession ssn; Packet *p1 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 2000123 xxxxABC\r\n" "Content-Type: text/html\r\n" "Content-Length: 7\r\n" "\r\n" "xxxxABC"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOCLIENT; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:\"123\"; http_stat_code; offset:4; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (!(PacketAlertCheck(p1, 1))) { printf("sid 1 didn't match but should have\n"); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); return result; } static int DetectEngineHttpStatCodeTest03(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; int result = 0; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 123"; uint32_t http_len2 = sizeof(http_buf2) - 1; uint8_t http_buf3[] = "456789\r\n" "Content-Type: text/html\r\n" "Content-Length: 17\r\n" "\r\n" "12345678901234ABC"; uint32_t http_len3 = sizeof(http_buf3) - 1; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:\"789\"; http_stat_code; offset:5; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have\n"); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf3, http_len3); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (!(PacketAlertCheck(p2, 1))) { printf("sid 1 didn't match but should have"); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest04(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200123 abcdef\r\n" "Content-Type: text/html\r\n" "Content-Length: 6\r\n" "\r\n" "abcdef"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:!\"200\"; http_stat_code; offset:3; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (!PacketAlertCheck(p2, 1)) { printf("sid 1 didn't match but should have: "); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest05(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200123 abcdef\r\n" "Content-Type: text/html\r\n" "Content-Length: 6\r\n" "\r\n" "abcdef"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:\"200\"; http_stat_code; depth:3; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (!PacketAlertCheck(p2, 1)) { printf("sid 1 didn't match but should have: "); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest06(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200123 abcdef\r\n" "Content-Type: text/html\r\n" "Content-Length: 6\r\n" "\r\n" "abcdef"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:!\"123\"; http_stat_code; depth:3; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (!PacketAlertCheck(p2, 1)) { printf("sid 1 didn't match but should have: "); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest07(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200123 abcdef\r\n" "Content-Type: text/html\r\n" "Content-Length: 6\r\n" "\r\n" "abcdef"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:!\"123\"; http_stat_code; offset:3; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (PacketAlertCheck(p2, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest08(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200123 abcdef\r\n" "Content-Type: text/html\r\n" "Content-Length: 6\r\n" "\r\n" "abcdef"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:!\"200\"; http_stat_code; depth:3; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (PacketAlertCheck(p2, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest09(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200123 abcdef\r\n" "Content-Type: text/html\r\n" "Content-Length: 6\r\n" "\r\n" "abcdef"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:\"200\"; http_stat_code; depth:3; " "content:\"123\"; http_stat_code; within:3; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (!PacketAlertCheck(p2, 1)) { printf("sid 1 didn't match but should have: "); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest10(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200123 abcdef\r\n" "Content-Type: text/html\r\n" "Content-Length: 6\r\n" "\r\n" "abcdef"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:\"200\"; http_stat_code; depth:3; " "content:!\"124\"; http_stat_code; within:3; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (!PacketAlertCheck(p2, 1)) { printf("sid 1 didn't match but should have: "); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest11(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200123 abcdef\r\n" "Content-Type: text/html\r\n" "Content-Length: 6\r\n" "\r\n" "abcdef"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:\"200\"; http_stat_code; depth:3; " "content:\"124\"; http_stat_code; within:3; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (PacketAlertCheck(p2, 1)) { printf("sid 1 did match but should not have: "); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest12(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200123 abcdef\r\n" "Content-Type: text/html\r\n" "Content-Length: 6\r\n" "\r\n" "abcdef"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:\"20\"; http_stat_code; depth:2; " "content:\"23\"; http_stat_code; distance:2; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (!PacketAlertCheck(p2, 1)) { printf("sid 1 did not match but should have: "); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest13(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200123 abcdef\r\n" "Content-Type: text/html\r\n" "Content-Length: 6\r\n" "\r\n" "abcdef"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "content:\"20\"; http_stat_code; depth:3; " "content:!\"25\"; http_stat_code; distance:2; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (!PacketAlertCheck(p2, 1)) { printf("sid 1 did not match but should have: "); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest14(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200123 abcdef\r\n" "Content-Type: text/html\r\n" "Content-Length: 6\r\n" "\r\n" "abcdef"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "pcre:/20/S; " "content:\"23\"; http_stat_code; distance:2; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (!PacketAlertCheck(p2, 1)) { printf("sid 1 did not match but should have: "); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } static int DetectEngineHttpStatCodeTest15(void) { TcpSession ssn; Packet *p1 = NULL; Packet *p2 = NULL; ThreadVars th_v; DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = NULL; HtpState *http_state = NULL; Flow f; uint8_t http_buf1[] = "GET /index.html HTTP/1.0\r\n" "Host: www.openinfosecfoundation.org\r\n" "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7\r\n" "\r\n"; uint32_t http_len1 = sizeof(http_buf1) - 1; uint8_t http_buf2[] = "HTTP/1.0 200123 abcdef\r\n" "Content-Type: text/html\r\n" "Content-Length: 6\r\n" "\r\n" "abcdef"; uint32_t http_len2 = sizeof(http_buf2) - 1; int result = 0; AppLayerParserThreadCtx *alp_tctx = AppLayerParserThreadCtxAlloc(); memset(&th_v, 0, sizeof(th_v)); memset(&f, 0, sizeof(f)); memset(&ssn, 0, sizeof(ssn)); p1 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); p2 = UTHBuildPacket(NULL, 0, IPPROTO_TCP); FLOW_INITIALIZE(&f); f.protoctx = (void *)&ssn; f.proto = IPPROTO_TCP; f.flags |= FLOW_IPV4; p1->flow = &f; p1->flowflags |= FLOW_PKT_TOSERVER; p1->flowflags |= FLOW_PKT_ESTABLISHED; p1->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; p2->flow = &f; p2->flowflags |= FLOW_PKT_TOCLIENT; p2->flowflags |= FLOW_PKT_ESTABLISHED; p2->flags |= PKT_HAS_FLOW|PKT_STREAM_EST; f.alproto = ALPROTO_HTTP; StreamTcpInitConfig(TRUE); de_ctx = DetectEngineCtxInit(); if (de_ctx == NULL) goto end; de_ctx->flags |= DE_QUIET; de_ctx->sig_list = SigInit(de_ctx,"alert http any any -> any any " "(msg:\"http stat code test\"; " "pcre:/200/S; " "content:!\"124\"; http_stat_code; distance:0; within:3; " "sid:1;)"); if (de_ctx->sig_list == NULL) goto end; SigGroupBuild(de_ctx); DetectEngineThreadCtxInit(&th_v, (void *)de_ctx, (void *)&det_ctx); SCMutexLock(&f.m); int r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOSERVER, http_buf1, http_len1); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: ", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); http_state = f.alstate; if (http_state == NULL) { printf("no http state: \n"); result = 0; goto end; } /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p1); if (PacketAlertCheck(p1, 1)) { printf("sid 1 matched but shouldn't have: "); goto end; } SCMutexLock(&f.m); r = AppLayerParserParse(alp_tctx, &f, ALPROTO_HTTP, STREAM_TOCLIENT, http_buf2, http_len2); if (r != 0) { printf("toserver chunk 1 returned %" PRId32 ", expected 0: \n", r); result = 0; SCMutexUnlock(&f.m); goto end; } SCMutexUnlock(&f.m); /* do detect */ SigMatchSignatures(&th_v, de_ctx, det_ctx, p2); if (!PacketAlertCheck(p2, 1)) { printf("sid 1 did not match but should have: "); goto end; } result = 1; end: if (alp_tctx != NULL) AppLayerParserThreadCtxFree(alp_tctx); if (de_ctx != NULL) SigGroupCleanup(de_ctx); if (de_ctx != NULL) SigCleanSignatures(de_ctx); if (de_ctx != NULL) DetectEngineCtxFree(de_ctx); StreamTcpFreeConfig(TRUE); FLOW_DESTROY(&f); UTHFreePackets(&p1, 1); UTHFreePackets(&p2, 1); return result; } #endif /* UNITTESTS */ void DetectEngineHttpStatCodeRegisterTests(void) { #ifdef UNITTESTS UtRegisterTest("DetectEngineHttpStatCodeTest01", DetectEngineHttpStatCodeTest01, 1); UtRegisterTest("DetectEngineHttpStatCodeTest02", DetectEngineHttpStatCodeTest02, 1); UtRegisterTest("DetectEngineHttpStatCodeTest03", DetectEngineHttpStatCodeTest03, 1); UtRegisterTest("DetectEngineHttpStatCodeTest04", DetectEngineHttpStatCodeTest04, 1); UtRegisterTest("DetectEngineHttpStatCodeTest05", DetectEngineHttpStatCodeTest05, 1); UtRegisterTest("DetectEngineHttpStatCodeTest06", DetectEngineHttpStatCodeTest06, 1); UtRegisterTest("DetectEngineHttpStatCodeTest07", DetectEngineHttpStatCodeTest07, 1); UtRegisterTest("DetectEngineHttpStatCodeTest08", DetectEngineHttpStatCodeTest08, 1); UtRegisterTest("DetectEngineHttpStatCodeTest09", DetectEngineHttpStatCodeTest09, 1); UtRegisterTest("DetectEngineHttpStatCodeTest10", DetectEngineHttpStatCodeTest10, 1); UtRegisterTest("DetectEngineHttpStatCodeTest11", DetectEngineHttpStatCodeTest11, 1); UtRegisterTest("DetectEngineHttpStatCodeTest12", DetectEngineHttpStatCodeTest12, 1); UtRegisterTest("DetectEngineHttpStatCodeTest13", DetectEngineHttpStatCodeTest13, 1); UtRegisterTest("DetectEngineHttpStatCodeTest14", DetectEngineHttpStatCodeTest14, 1); UtRegisterTest("DetectEngineHttpStatCodeTest15", DetectEngineHttpStatCodeTest15, 1); #endif /* UNITTESTS */ return; } /** * @} */
gpl-2.0
redstar3894/android-gcc
gcc/testsuite/gcc.target/i386/sse3-addsubpd.c
6
1975
/* { dg-do run } */ /* { dg-options "-O2 -msse3 -mfpmath=sse" } */ #ifndef CHECK_H #define CHECK_H "sse3-check.h" #endif #ifndef TEST #define TEST sse3_test #endif #include CHECK_H #include <pmmintrin.h> static void sse3_test_addsubpd (double *i1, double *i2, double *r) { __m128d t1 = _mm_loadu_pd (i1); __m128d t2 = _mm_loadu_pd (i2); t1 = _mm_addsub_pd (t1, t2); _mm_storeu_pd (r, t1); } static void sse3_test_addsubpd_subsume (double *i1, double *i2, double *r) { __m128d t1 = _mm_load_pd (i1); __m128d t2 = _mm_load_pd (i2); t1 = _mm_addsub_pd (t1, t2); _mm_storeu_pd (r, t1); } static int chk_pd (double *v1, double *v2) { int i; int n_fails = 0; for (i = 0; i < 2; i++) if (v1[i] != v2[i]) n_fails += 1; return n_fails; } static double p1[2] __attribute__ ((aligned(16))); static double p2[2] __attribute__ ((aligned(16))); static double p3[2]; static double ck[2]; double vals[80] = { 100.0, 200.0, 300.0, 400.0, 5.0, -1.0, .345, -21.5, 1100.0, 0.235, 321.3, 53.40, 0.3, 10.0, 42.0, 32.52, 32.6, 123.3, 1.234, 2.156, 0.1, 3.25, 4.75, 32.44, 12.16, 52.34, 64.12, 71.13, -.1, 2.30, 5.12, 3.785, 541.3, 321.4, 231.4, 531.4, 71., 321., 231., -531., 23.45, 23.45, 23.45, 23.45, 23.45, 23.45, 23.45, 23.45, 23.45, -1.43, -6.74, 6.345, -20.1, -20.1, -40.1, -40.1, 1.234, 2.345, 3.456, 4.567, 5.678, 6.789, 7.891, 8.912, -9.32, -8.41, -7.50, -6.59, -5.68, -4.77, -3.86, -2.95, 9.32, 8.41, 7.50, 6.59, -5.68, -4.77, -3.86, -2.95 }; static void TEST (void) { int i; int fail = 0; for (i = 0; i < 80; i += 4) { p1[0] = vals[i+0]; p1[1] = vals[i+1]; p2[0] = vals[i+2]; p2[1] = vals[i+3]; ck[0] = p1[0] - p2[0]; ck[1] = p1[1] + p2[1]; sse3_test_addsubpd (p1, p2, p3); fail += chk_pd (ck, p3); sse3_test_addsubpd_subsume (p1, p2, p3); fail += chk_pd (ck, p3); } if (fail != 0) abort (); }
gpl-2.0
holke/p4est
example/timings/bricks2.c
6
4643
/* This file is part of p4est. p4est is a C library to manage a collection (a forest) of multiple connected adaptive quadtrees or octrees in parallel. Copyright (C) 2010 The University of Texas System Additional copyright (C) 2011 individual authors Written by Carsten Burstedde, Lucas C. Wilcox, and Tobin Isaac p4est is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. p4est is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with p4est; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef P4_TO_P8 #include <p4est_bits.h> #include <p4est_extended.h> #include <p4est_vtk.h> #else #include <p8est_bits.h> #include <p8est_extended.h> #include <p8est_vtk.h> #endif #include <sc_options.h> /* #define BRICKS_VTK */ static int refine_level; static int level_shift; static int refine_fractal (p4est_t * p4est, p4est_topidx_t which_tree, p4est_quadrant_t * q) { int qid; if ((int) q->level >= refine_level) { return 0; } if ((int) q->level < refine_level - level_shift) { return 1; } qid = ((int) q->level == 0 ? (which_tree % P4EST_CHILDREN) : p4est_quadrant_child_id (q)); return (qid == 0 || qid == 3 #ifdef P4_TO_P8 || qid == 5 || qid == 6 #endif ); } static void run_bricks (sc_MPI_Comm mpicomm, int per, int l, int rlevel) { int mpiret; int tcount; double elapsed_create, elapsed_partition, elapsed_balance; #ifdef BRICKS_VTK char filename[BUFSIZ]; #endif p4est_connectivity_t *conn; p4est_t *p4est; P4EST_GLOBAL_PRODUCTIONF ("Run bricks on level %d/%d\n", l, rlevel); P4EST_ASSERT (l <= rlevel); /* create and refine the forest */ mpiret = sc_MPI_Barrier (mpicomm); SC_CHECK_MPI (mpiret); elapsed_create = -sc_MPI_Wtime (); tcount = 1 << l; #ifndef P4_TO_P8 conn = p4est_connectivity_new_brick (tcount, tcount, per, per); #else conn = p8est_connectivity_new_brick (tcount, tcount, tcount, per, per, per); #endif p4est = p4est_new_ext (mpicomm, conn, 0, rlevel - l, 1, 0, NULL, NULL); level_shift = 4; refine_level = rlevel - l + level_shift; p4est_refine (p4est, 1, refine_fractal, NULL); elapsed_create += sc_MPI_Wtime (); /* partition the forest */ mpiret = sc_MPI_Barrier (mpicomm); SC_CHECK_MPI (mpiret); elapsed_partition = -sc_MPI_Wtime (); p4est_partition (p4est, 0, NULL); elapsed_partition += sc_MPI_Wtime (); /* balance the forest */ mpiret = sc_MPI_Barrier (mpicomm); SC_CHECK_MPI (mpiret); elapsed_balance = -sc_MPI_Wtime (); p4est_balance (p4est, P4EST_CONNECT_FULL, NULL); elapsed_balance += sc_MPI_Wtime (); /* postprocessing */ P4EST_GLOBAL_PRODUCTIONF ("Timings %g %g %g\n", elapsed_create, elapsed_partition, elapsed_balance); #ifdef BRICKS_VTK snprintf (filename, BUFSIZ, "brick_%02d_%02d_B", rlevel, l); p4est_vtk_write_file (p4est, NULL, filename); #endif p4est_destroy (p4est); p4est_connectivity_destroy (conn); } int main (int argc, char **argv) { sc_MPI_Comm mpicomm; int mpiret, retval; int rlevel, l; int periodic; sc_options_t *opt; mpiret = sc_MPI_Init (&argc, &argv); SC_CHECK_MPI (mpiret); mpicomm = sc_MPI_COMM_WORLD; sc_init (sc_MPI_COMM_WORLD, 1, 1, NULL, SC_LP_DEFAULT); p4est_init (NULL, SC_LP_DEFAULT); opt = sc_options_new (argv[0]); sc_options_add_int (opt, 'l', "level", &rlevel, 0, "Upfront refinement level"); sc_options_add_switch (opt, 'p', "periodic", &periodic, "Periodic connectivity"); retval = sc_options_parse (p4est_package_id, SC_LP_ERROR, opt, argc, argv); if (retval == -1 || retval < argc) { sc_options_print_usage (p4est_package_id, SC_LP_PRODUCTION, opt, NULL); sc_abort_collective ("Usage error"); } for (l = 0; l <= rlevel; ++l) { run_bricks (mpicomm, periodic, l, rlevel); } sc_options_destroy (opt); sc_finalize (); mpiret = sc_MPI_Finalize (); SC_CHECK_MPI (mpiret); return 0; }
gpl-2.0
heshamelmatary/rtems-gsoc2013
c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_categories.c
6
10886
/** * @file * * @ingroup ppc_exc * * @brief PowerPC Exceptions implementation. */ /* * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr) * Canon Centre Recherche France. * * Copyright (C) 2009-2011 embedded brains GmbH. * * Enhanced by Jay Kulpinski <jskulpin@eng01.gdds.com> * to support 603, 603e, 604, 604e exceptions * * Moved to "libcpu/powerpc/new-exceptions" and consolidated * by Thomas Doerfler <Thomas.Doerfler@embedded-brains.de> * to be common for all PPCs with new exceptions. * * Derived from file "libcpu/powerpc/new-exceptions/raw_exception.c". * * The license and distribution terms for this file may be * found in the file LICENSE in this distribution or at * http://www.rtems.com/license/LICENSE. */ #include <bsp/vectors.h> #define PPC_BASIC_VECS_WO_SYS \ [ASM_RESET_VECTOR] = PPC_EXC_CLASSIC, \ [ASM_MACH_VECTOR] = PPC_EXC_CLASSIC, \ [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC, \ [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC, \ [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, \ [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC, \ [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC, \ [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC, \ [ASM_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, \ [ASM_TRACE_VECTOR] = PPC_EXC_CLASSIC #define PPC_BASIC_VECS \ PPC_BASIC_VECS_WO_SYS, \ [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC static const ppc_exc_categories ppc_405_category_table = { [ASM_BOOKE_CRIT_VECTOR] = PPC_EXC_405_CRITICAL | PPC_EXC_ASYNC, [ASM_MACH_VECTOR] = PPC_EXC_405_CRITICAL, [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC, [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC, [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC, [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC, [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC, [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC, [ASM_PPC405_APU_UNAVAIL_VECTOR] = PPC_EXC_CLASSIC, [ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_405_CRITICAL | PPC_EXC_ASYNC, [ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_TRACE_VECTOR] = PPC_EXC_405_CRITICAL, }; static const ppc_exc_categories ppc_booke_category_table = { [ASM_BOOKE_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC, [ASM_MACH_VECTOR] = PPC_EXC_E500_MACHCHK, [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC, [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC, [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC, [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC, [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC, [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC, [ASM_BOOKE_APU_VECTOR] = PPC_EXC_CLASSIC, [ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC, [ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_BOOKE_DEBUG_VECTOR] = PPC_EXC_BOOKE_CRITICAL, }; static const ppc_exc_categories mpc_5xx_category_table = { [ASM_RESET_VECTOR] = PPC_EXC_CLASSIC, [ASM_MACH_VECTOR] = PPC_EXC_CLASSIC, [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC, [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC, [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC, [ASM_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC, [ASM_TRACE_VECTOR] = PPC_EXC_CLASSIC, [ASM_5XX_FLOATASSIST_VECTOR] = PPC_EXC_CLASSIC, [ASM_5XX_SOFTEMUL_VECTOR] = PPC_EXC_CLASSIC, [ASM_5XX_IPROT_VECTOR] = PPC_EXC_CLASSIC, [ASM_5XX_DPROT_VECTOR] = PPC_EXC_CLASSIC, [ASM_5XX_DBREAK_VECTOR] = PPC_EXC_CLASSIC, [ASM_5XX_IBREAK_VECTOR] = PPC_EXC_CLASSIC, [ASM_5XX_MEBREAK_VECTOR] = PPC_EXC_CLASSIC, [ASM_5XX_NMEBREAK_VECTOR] = PPC_EXC_CLASSIC, }; static const ppc_exc_categories mpc_603_category_table = { PPC_BASIC_VECS, [ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_60X_IMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_DLMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_DSMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC, }; static const ppc_exc_categories mpc_604_category_table = { PPC_BASIC_VECS, [ASM_60X_PERFMON_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC, }; static const ppc_exc_categories mpc_604_altivec_category_table = { PPC_BASIC_VECS, [ASM_60X_PERFMON_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_VEC_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_VEC_ASSIST_VECTOR] = PPC_EXC_CLASSIC, }; static const ppc_exc_categories mpc_750_category_table = { PPC_BASIC_VECS, [ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_ITM_VECTOR] = PPC_EXC_CLASSIC, }; static const ppc_exc_categories mpc_750_altivec_category_table = { PPC_BASIC_VECS, [ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_ITM_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_VEC_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_VEC_ASSIST_VECTOR] = PPC_EXC_CLASSIC, }; static const ppc_exc_categories mpc_860_category_table = { PPC_BASIC_VECS, [ASM_8XX_FLOATASSIST_VECTOR] = PPC_EXC_CLASSIC, [ASM_8XX_SOFTEMUL_VECTOR] = PPC_EXC_CLASSIC, [ASM_8XX_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_8XX_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_8XX_ITLBERROR_VECTOR] = PPC_EXC_CLASSIC, [ASM_8XX_DTLBERROR_VECTOR] = PPC_EXC_CLASSIC, [ASM_8XX_DBREAK_VECTOR] = PPC_EXC_CLASSIC, [ASM_8XX_IBREAK_VECTOR] = PPC_EXC_CLASSIC, [ASM_8XX_PERIFBREAK_VECTOR] = PPC_EXC_CLASSIC, [ASM_8XX_DEVPORT_VECTOR] = PPC_EXC_CLASSIC, }; static const ppc_exc_categories e300_category_table = { [ASM_RESET_VECTOR] = PPC_EXC_CLASSIC, [ASM_MACH_VECTOR] = PPC_EXC_CLASSIC, [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC, [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC, [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC, [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC, [ASM_FLOAT_VECTOR] = PPC_EXC_NAKED, [ASM_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC, [ASM_TRACE_VECTOR] = PPC_EXC_CLASSIC, [ASM_E300_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC, [ASM_E300_PERFMON_VECTOR] = PPC_EXC_CLASSIC, [ASM_E300_IMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_E300_DLMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_E300_DSMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_E300_ADDR_VECTOR] = PPC_EXC_CLASSIC, [ASM_E300_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, }; static const ppc_exc_categories e200_category_table = { [ASM_BOOKE_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC, [ASM_MACH_VECTOR] = PPC_EXC_BOOKE_CRITICAL, [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC, [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC, [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC, [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC, [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC, [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC, [ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC, [ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC, /* FIXME: Depending on HDI0 [DAPUEN] this is a critical or debug exception */ [ASM_BOOKE_DEBUG_VECTOR] = PPC_EXC_BOOKE_CRITICAL, [ASM_E500_SPE_UNAVAILABLE_VECTOR] = PPC_EXC_CLASSIC, [ASM_E500_EMB_FP_DATA_VECTOR] = PPC_EXC_CLASSIC, [ASM_E500_EMB_FP_ROUND_VECTOR] = PPC_EXC_CLASSIC }; static const ppc_exc_categories e500_category_table = { [ASM_BOOKE_CRIT_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC, [ASM_MACH_VECTOR] = PPC_EXC_E500_MACHCHK, [ASM_PROT_VECTOR] = PPC_EXC_CLASSIC, [ASM_ISI_VECTOR] = PPC_EXC_CLASSIC, [ASM_EXT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_ALIGN_VECTOR] = PPC_EXC_CLASSIC, [ASM_PROG_VECTOR] = PPC_EXC_CLASSIC, [ASM_FLOAT_VECTOR] = PPC_EXC_CLASSIC, [ASM_SYS_VECTOR] = PPC_EXC_CLASSIC, [ASM_BOOKE_APU_VECTOR] = PPC_EXC_CLASSIC, [ASM_BOOKE_DEC_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_BOOKE_FIT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_BOOKE_WDOG_VECTOR] = PPC_EXC_BOOKE_CRITICAL | PPC_EXC_ASYNC, [ASM_BOOKE_DTLBMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_BOOKE_ITLBMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_BOOKE_DEBUG_VECTOR] = PPC_EXC_BOOKE_CRITICAL, [ASM_E500_SPE_UNAVAILABLE_VECTOR] = PPC_EXC_CLASSIC, [ASM_E500_EMB_FP_DATA_VECTOR] = PPC_EXC_CLASSIC, [ASM_E500_EMB_FP_ROUND_VECTOR] = PPC_EXC_CLASSIC, [ASM_E500_PERFMON_VECTOR] = PPC_EXC_CLASSIC }; static const ppc_exc_categories psim_category_table = { PPC_BASIC_VECS_WO_SYS, [ASM_60X_SYSMGMT_VECTOR] = PPC_EXC_CLASSIC | PPC_EXC_ASYNC, [ASM_60X_IMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_DLMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_DSMISS_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_ADDR_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_VEC_VECTOR] = PPC_EXC_CLASSIC, [ASM_60X_VEC_ASSIST_VECTOR] = PPC_EXC_CLASSIC, }; const ppc_exc_categories *ppc_exc_categories_for_cpu(ppc_cpu_id_t cpu) { if (ppc_cpu_has_altivec()) { switch (cpu) { case PPC_7400: return &mpc_750_altivec_category_table; case PPC_7455: case PPC_7457: return &mpc_604_altivec_category_table; default: break; } } switch (cpu) { case PPC_7400: case PPC_750: return &mpc_750_category_table; case PPC_7455: case PPC_7457: case PPC_604: case PPC_604e: case PPC_604r: return &mpc_604_category_table; case PPC_603: case PPC_603e: case PPC_603le: case PPC_603ev: /* case PPC_8240: same value as 8260 */ case PPC_8260: case PPC_8245: return &mpc_603_category_table; case PPC_e300c1: case PPC_e300c2: case PPC_e300c3: return &e300_category_table; case PPC_PSIM: return &psim_category_table; case PPC_8540: case PPC_e500v2: return &e500_category_table; case PPC_e200z0: case PPC_e200z1: case PPC_e200z4: case PPC_e200z6: case PPC_e200z7: return &e200_category_table; case PPC_5XX: return &mpc_5xx_category_table; case PPC_860: return &mpc_860_category_table; case PPC_405: case PPC_405GP: case PPC_405EX: return &ppc_405_category_table; case PPC_440: return &ppc_booke_category_table; default: break; } return NULL; } ppc_exc_category ppc_exc_category_for_vector(const ppc_exc_categories *categories, unsigned vector) { if (vector <= LAST_VALID_EXC) { return (*categories) [vector]; } else { return PPC_EXC_INVALID; } }
gpl-2.0
sancome/linux-3.x
arch/um/drivers/net_kern.c
262
20495
/* * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and * James Leu (jleu@mindspring.net). * Copyright (C) 2001 by various other people who didn't put their name here. * Licensed under the GPL. */ #include <linux/bootmem.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "init.h" #include "irq_kern.h" #include "irq_user.h" #include "mconsole_kern.h" #include "net_kern.h" #include "net_user.h" #define DRIVER_NAME "uml-netdev" static DEFINE_SPINLOCK(opened_lock); static LIST_HEAD(opened); /* * The drop_skb is used when we can't allocate an skb. The * packet is read into drop_skb in order to get the data off the * connection to the host. * It is reallocated whenever a maximum packet size is seen which is * larger than any seen before. update_drop_skb is called from * eth_configure when a new interface is added. */ static DEFINE_SPINLOCK(drop_lock); static struct sk_buff *drop_skb; static int drop_max; static int update_drop_skb(int max) { struct sk_buff *new; unsigned long flags; int err = 0; spin_lock_irqsave(&drop_lock, flags); if (max <= drop_max) goto out; err = -ENOMEM; new = dev_alloc_skb(max); if (new == NULL) goto out; skb_put(new, max); kfree_skb(drop_skb); drop_skb = new; drop_max = max; err = 0; out: spin_unlock_irqrestore(&drop_lock, flags); return err; } static int uml_net_rx(struct net_device *dev) { struct uml_net_private *lp = netdev_priv(dev); int pkt_len; struct sk_buff *skb; /* If we can't allocate memory, try again next round. */ skb = dev_alloc_skb(lp->max_packet); if (skb == NULL) { drop_skb->dev = dev; /* Read a packet into drop_skb and don't do anything with it. */ (*lp->read)(lp->fd, drop_skb, lp); dev->stats.rx_dropped++; return 0; } skb->dev = dev; skb_put(skb, lp->max_packet); skb_reset_mac_header(skb); pkt_len = (*lp->read)(lp->fd, skb, lp); if (pkt_len > 0) { skb_trim(skb, pkt_len); skb->protocol = (*lp->protocol)(skb); dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; netif_rx(skb); return pkt_len; } kfree_skb(skb); return pkt_len; } static void uml_dev_close(struct work_struct *work) { struct uml_net_private *lp = container_of(work, struct uml_net_private, work); dev_close(lp->dev); } static irqreturn_t uml_net_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct uml_net_private *lp = netdev_priv(dev); int err; if (!netif_running(dev)) return IRQ_NONE; spin_lock(&lp->lock); while ((err = uml_net_rx(dev)) > 0) ; if (err < 0) { printk(KERN_ERR "Device '%s' read returned %d, shutting it down\n", dev->name, err); /* dev_close can't be called in interrupt context, and takes * again lp->lock. * And dev_close() can be safely called multiple times on the * same device, since it tests for (dev->flags & IFF_UP). So * there's no harm in delaying the device shutdown. * Furthermore, the workqueue will not re-enqueue an already * enqueued work item. */ schedule_work(&lp->work); goto out; } reactivate_fd(lp->fd, UM_ETH_IRQ); out: spin_unlock(&lp->lock); return IRQ_HANDLED; } static int uml_net_open(struct net_device *dev) { struct uml_net_private *lp = netdev_priv(dev); int err; if (lp->fd >= 0) { err = -ENXIO; goto out; } lp->fd = (*lp->open)(&lp->user); if (lp->fd < 0) { err = lp->fd; goto out; } err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt, IRQF_DISABLED | IRQF_SHARED, dev->name, dev); if (err != 0) { printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err); err = -ENETUNREACH; goto out_close; } lp->tl.data = (unsigned long) &lp->user; netif_start_queue(dev); /* clear buffer - it can happen that the host side of the interface * is full when we get here. In this case, new data is never queued, * SIGIOs never arrive, and the net never works. */ while ((err = uml_net_rx(dev)) > 0) ; spin_lock(&opened_lock); list_add(&lp->list, &opened); spin_unlock(&opened_lock); return 0; out_close: if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user); lp->fd = -1; out: return err; } static int uml_net_close(struct net_device *dev) { struct uml_net_private *lp = netdev_priv(dev); netif_stop_queue(dev); free_irq(dev->irq, dev); if (lp->close != NULL) (*lp->close)(lp->fd, &lp->user); lp->fd = -1; spin_lock(&opened_lock); list_del(&lp->list); spin_unlock(&opened_lock); return 0; } static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct uml_net_private *lp = netdev_priv(dev); unsigned long flags; int len; netif_stop_queue(dev); spin_lock_irqsave(&lp->lock, flags); len = (*lp->write)(lp->fd, skb, lp); if (len == skb->len) { dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; dev->trans_start = jiffies; netif_start_queue(dev); /* this is normally done in the interrupt when tx finishes */ netif_wake_queue(dev); } else if (len == 0) { netif_start_queue(dev); dev->stats.tx_dropped++; } else { netif_start_queue(dev); printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len); } spin_unlock_irqrestore(&lp->lock, flags); dev_kfree_skb(skb); return NETDEV_TX_OK; } static void uml_net_set_multicast_list(struct net_device *dev) { return; } static void uml_net_tx_timeout(struct net_device *dev) { dev->trans_start = jiffies; netif_wake_queue(dev); } static int uml_net_change_mtu(struct net_device *dev, int new_mtu) { dev->mtu = new_mtu; return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void uml_net_poll_controller(struct net_device *dev) { disable_irq(dev->irq); uml_net_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif static void uml_net_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, DRIVER_NAME); strcpy(info->version, "42"); } static const struct ethtool_ops uml_net_ethtool_ops = { .get_drvinfo = uml_net_get_drvinfo, .get_link = ethtool_op_get_link, }; static void uml_net_user_timer_expire(unsigned long _conn) { #ifdef undef struct connection *conn = (struct connection *)_conn; dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn); do_connect(conn); #endif } static void setup_etheraddr(char *str, unsigned char *addr, char *name) { char *end; int i; if (str == NULL) goto random; for (i = 0; i < 6; i++) { addr[i] = simple_strtoul(str, &end, 16); if ((end == str) || ((*end != ':') && (*end != ',') && (*end != '\0'))) { printk(KERN_ERR "setup_etheraddr: failed to parse '%s' " "as an ethernet address\n", str); goto random; } str = end + 1; } if (is_multicast_ether_addr(addr)) { printk(KERN_ERR "Attempt to assign a multicast ethernet address to a " "device disallowed\n"); goto random; } if (!is_valid_ether_addr(addr)) { printk(KERN_ERR "Attempt to assign an invalid ethernet address to a " "device disallowed\n"); goto random; } if (!is_local_ether_addr(addr)) { printk(KERN_WARNING "Warning: Assigning a globally valid ethernet " "address to a device\n"); printk(KERN_WARNING "You should set the 2nd rightmost bit in " "the first byte of the MAC,\n"); printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n", addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4], addr[5]); } return; random: printk(KERN_INFO "Choosing a random ethernet address for device %s\n", name); random_ether_addr(addr); } static DEFINE_SPINLOCK(devices_lock); static LIST_HEAD(devices); static struct platform_driver uml_net_driver = { .driver = { .name = DRIVER_NAME, }, }; static void net_device_release(struct device *dev) { struct uml_net *device = dev_get_drvdata(dev); struct net_device *netdev = device->dev; struct uml_net_private *lp = netdev_priv(netdev); if (lp->remove != NULL) (*lp->remove)(&lp->user); list_del(&device->list); kfree(device); free_netdev(netdev); } static const struct net_device_ops uml_netdev_ops = { .ndo_open = uml_net_open, .ndo_stop = uml_net_close, .ndo_start_xmit = uml_net_start_xmit, .ndo_set_rx_mode = uml_net_set_multicast_list, .ndo_tx_timeout = uml_net_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = uml_net_change_mtu, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = uml_net_poll_controller, #endif }; /* * Ensures that platform_driver_register is called only once by * eth_configure. Will be set in an initcall. */ static int driver_registered; static void eth_configure(int n, void *init, char *mac, struct transport *transport) { struct uml_net *device; struct net_device *dev; struct uml_net_private *lp; int err, size; size = transport->private_size + sizeof(struct uml_net_private); device = kzalloc(sizeof(*device), GFP_KERNEL); if (device == NULL) { printk(KERN_ERR "eth_configure failed to allocate struct " "uml_net\n"); return; } dev = alloc_etherdev(size); if (dev == NULL) { printk(KERN_ERR "eth_configure: failed to allocate struct " "net_device for eth%d\n", n); goto out_free_device; } INIT_LIST_HEAD(&device->list); device->index = n; /* If this name ends up conflicting with an existing registered * netdevice, that is OK, register_netdev{,ice}() will notice this * and fail. */ snprintf(dev->name, sizeof(dev->name), "eth%d", n); setup_etheraddr(mac, device->mac, dev->name); printk(KERN_INFO "Netdevice %d (%pM) : ", n, device->mac); lp = netdev_priv(dev); /* This points to the transport private data. It's still clear, but we * must memset it to 0 *now*. Let's help the drivers. */ memset(lp, 0, size); INIT_WORK(&lp->work, uml_dev_close); /* sysfs register */ if (!driver_registered) { platform_driver_register(&uml_net_driver); driver_registered = 1; } device->pdev.id = n; device->pdev.name = DRIVER_NAME; device->pdev.dev.release = net_device_release; dev_set_drvdata(&device->pdev.dev, device); if (platform_device_register(&device->pdev)) goto out_free_netdev; SET_NETDEV_DEV(dev,&device->pdev.dev); device->dev = dev; /* * These just fill in a data structure, so there's no failure * to be worried about. */ (*transport->kern->init)(dev, init); *lp = ((struct uml_net_private) { .list = LIST_HEAD_INIT(lp->list), .dev = dev, .fd = -1, .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0}, .max_packet = transport->user->max_packet, .protocol = transport->kern->protocol, .open = transport->user->open, .close = transport->user->close, .remove = transport->user->remove, .read = transport->kern->read, .write = transport->kern->write, .add_address = transport->user->add_address, .delete_address = transport->user->delete_address }); init_timer(&lp->tl); spin_lock_init(&lp->lock); lp->tl.function = uml_net_user_timer_expire; memcpy(lp->mac, device->mac, sizeof(lp->mac)); if ((transport->user->init != NULL) && ((*transport->user->init)(&lp->user, dev) != 0)) goto out_unregister; /* don't use eth_mac_addr, it will not work here */ memcpy(dev->dev_addr, device->mac, ETH_ALEN); dev->mtu = transport->user->mtu; dev->netdev_ops = &uml_netdev_ops; dev->ethtool_ops = &uml_net_ethtool_ops; dev->watchdog_timeo = (HZ >> 1); dev->irq = UM_ETH_IRQ; err = update_drop_skb(lp->max_packet); if (err) goto out_undo_user_init; rtnl_lock(); err = register_netdevice(dev); rtnl_unlock(); if (err) goto out_undo_user_init; spin_lock(&devices_lock); list_add(&device->list, &devices); spin_unlock(&devices_lock); return; out_undo_user_init: if (transport->user->remove != NULL) (*transport->user->remove)(&lp->user); out_unregister: platform_device_unregister(&device->pdev); return; /* platform_device_unregister frees dev and device */ out_free_netdev: free_netdev(dev); out_free_device: kfree(device); } static struct uml_net *find_device(int n) { struct uml_net *device; struct list_head *ele; spin_lock(&devices_lock); list_for_each(ele, &devices) { device = list_entry(ele, struct uml_net, list); if (device->index == n) goto out; } device = NULL; out: spin_unlock(&devices_lock); return device; } static int eth_parse(char *str, int *index_out, char **str_out, char **error_out) { char *end; int n, err = -EINVAL; n = simple_strtoul(str, &end, 0); if (end == str) { *error_out = "Bad device number"; return err; } str = end; if (*str != '=') { *error_out = "Expected '=' after device number"; return err; } str++; if (find_device(n)) { *error_out = "Device already configured"; return err; } *index_out = n; *str_out = str; return 0; } struct eth_init { struct list_head list; char *init; int index; }; static DEFINE_SPINLOCK(transports_lock); static LIST_HEAD(transports); /* Filled in during early boot */ static LIST_HEAD(eth_cmd_line); static int check_transport(struct transport *transport, char *eth, int n, void **init_out, char **mac_out) { int len; len = strlen(transport->name); if (strncmp(eth, transport->name, len)) return 0; eth += len; if (*eth == ',') eth++; else if (*eth != '\0') return 0; *init_out = kmalloc(transport->setup_size, GFP_KERNEL); if (*init_out == NULL) return 1; if (!transport->setup(eth, mac_out, *init_out)) { kfree(*init_out); *init_out = NULL; } return 1; } void register_transport(struct transport *new) { struct list_head *ele, *next; struct eth_init *eth; void *init; char *mac = NULL; int match; spin_lock(&transports_lock); BUG_ON(!list_empty(&new->list)); list_add(&new->list, &transports); spin_unlock(&transports_lock); list_for_each_safe(ele, next, &eth_cmd_line) { eth = list_entry(ele, struct eth_init, list); match = check_transport(new, eth->init, eth->index, &init, &mac); if (!match) continue; else if (init != NULL) { eth_configure(eth->index, init, mac, new); kfree(init); } list_del(&eth->list); } } static int eth_setup_common(char *str, int index) { struct list_head *ele; struct transport *transport; void *init; char *mac = NULL; int found = 0; spin_lock(&transports_lock); list_for_each(ele, &transports) { transport = list_entry(ele, struct transport, list); if (!check_transport(transport, str, index, &init, &mac)) continue; if (init != NULL) { eth_configure(index, init, mac, transport); kfree(init); } found = 1; break; } spin_unlock(&transports_lock); return found; } static int __init eth_setup(char *str) { struct eth_init *new; char *error; int n, err; err = eth_parse(str, &n, &str, &error); if (err) { printk(KERN_ERR "eth_setup - Couldn't parse '%s' : %s\n", str, error); return 1; } new = alloc_bootmem(sizeof(*new)); if (new == NULL) { printk(KERN_ERR "eth_init : alloc_bootmem failed\n"); return 1; } INIT_LIST_HEAD(&new->list); new->index = n; new->init = str; list_add_tail(&new->list, &eth_cmd_line); return 1; } __setup("eth", eth_setup); __uml_help(eth_setup, "eth[0-9]+=<transport>,<options>\n" " Configure a network device.\n\n" ); static int net_config(char *str, char **error_out) { int n, err; err = eth_parse(str, &n, &str, error_out); if (err) return err; /* This string is broken up and the pieces used by the underlying * driver. So, it is freed only if eth_setup_common fails. */ str = kstrdup(str, GFP_KERNEL); if (str == NULL) { *error_out = "net_config failed to strdup string"; return -ENOMEM; } err = !eth_setup_common(str, n); if (err) kfree(str); return err; } static int net_id(char **str, int *start_out, int *end_out) { char *end; int n; n = simple_strtoul(*str, &end, 0); if ((*end != '\0') || (end == *str)) return -1; *start_out = n; *end_out = n; *str = end; return n; } static int net_remove(int n, char **error_out) { struct uml_net *device; struct net_device *dev; struct uml_net_private *lp; device = find_device(n); if (device == NULL) return -ENODEV; dev = device->dev; lp = netdev_priv(dev); if (lp->fd > 0) return -EBUSY; unregister_netdev(dev); platform_device_unregister(&device->pdev); return 0; } static struct mc_device net_mc = { .list = LIST_HEAD_INIT(net_mc.list), .name = "eth", .config = net_config, .get_config = NULL, .id = net_id, .remove = net_remove, }; #ifdef CONFIG_INET static int uml_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct in_ifaddr *ifa = ptr; struct net_device *dev = ifa->ifa_dev->dev; struct uml_net_private *lp; void (*proc)(unsigned char *, unsigned char *, void *); unsigned char addr_buf[4], netmask_buf[4]; if (dev->netdev_ops->ndo_open != uml_net_open) return NOTIFY_DONE; lp = netdev_priv(dev); proc = NULL; switch (event) { case NETDEV_UP: proc = lp->add_address; break; case NETDEV_DOWN: proc = lp->delete_address; break; } if (proc != NULL) { memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf)); memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf)); (*proc)(addr_buf, netmask_buf, &lp->user); } return NOTIFY_DONE; } /* uml_net_init shouldn't be called twice on two CPUs at the same time */ static struct notifier_block uml_inetaddr_notifier = { .notifier_call = uml_inetaddr_event, }; static void inet_register(void) { struct list_head *ele; struct uml_net_private *lp; struct in_device *ip; struct in_ifaddr *in; register_inetaddr_notifier(&uml_inetaddr_notifier); /* Devices may have been opened already, so the uml_inetaddr_notifier * didn't get a chance to run for them. This fakes it so that * addresses which have already been set up get handled properly. */ spin_lock(&opened_lock); list_for_each(ele, &opened) { lp = list_entry(ele, struct uml_net_private, list); ip = lp->dev->ip_ptr; if (ip == NULL) continue; in = ip->ifa_list; while (in != NULL) { uml_inetaddr_event(NULL, NETDEV_UP, in); in = in->ifa_next; } } spin_unlock(&opened_lock); } #else static inline void inet_register(void) { } #endif static int uml_net_init(void) { mconsole_register_dev(&net_mc); inet_register(); return 0; } __initcall(uml_net_init); static void close_devices(void) { struct list_head *ele; struct uml_net_private *lp; spin_lock(&opened_lock); list_for_each(ele, &opened) { lp = list_entry(ele, struct uml_net_private, list); free_irq(lp->dev->irq, lp->dev); if ((lp->close != NULL) && (lp->fd >= 0)) (*lp->close)(lp->fd, &lp->user); if (lp->remove != NULL) (*lp->remove)(&lp->user); } spin_unlock(&opened_lock); } __uml_exitcall(close_devices); void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *, void *), void *arg) { struct net_device *dev = d; struct in_device *ip = dev->ip_ptr; struct in_ifaddr *in; unsigned char address[4], netmask[4]; if (ip == NULL) return; in = ip->ifa_list; while (in != NULL) { memcpy(address, &in->ifa_address, sizeof(address)); memcpy(netmask, &in->ifa_mask, sizeof(netmask)); (*cb)(address, netmask, arg); in = in->ifa_next; } } int dev_netmask(void *d, void *m) { struct net_device *dev = d; struct in_device *ip = dev->ip_ptr; struct in_ifaddr *in; __be32 *mask_out = m; if (ip == NULL) return 1; in = ip->ifa_list; if (in == NULL) return 1; *mask_out = in->ifa_mask; return 0; } void *get_output_buffer(int *len_out) { void *ret; ret = (void *) __get_free_pages(GFP_KERNEL, 0); if (ret) *len_out = PAGE_SIZE; else *len_out = 0; return ret; } void free_output_buffer(void *buffer) { free_pages((unsigned long) buffer, 0); } int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out, char **gate_addr) { char *remain; remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL); if (remain != NULL) { printk(KERN_ERR "tap_setup_common - Extra garbage on " "specification : '%s'\n", remain); return 1; } return 0; } unsigned short eth_protocol(struct sk_buff *skb) { return eth_type_trans(skb, skb->dev); }
gpl-2.0
AOKP/lge-kernel-star
drivers/leds/leds-cobalt-raq.c
518
3315
/* * LEDs driver for the Cobalt Raq series. * * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/types.h> #define LED_WEB 0x04 #define LED_POWER_OFF 0x08 static void __iomem *led_port; static u8 led_value; static DEFINE_SPINLOCK(led_value_lock); static void raq_web_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { unsigned long flags; spin_lock_irqsave(&led_value_lock, flags); if (brightness) led_value |= LED_WEB; else led_value &= ~LED_WEB; writeb(led_value, led_port); spin_unlock_irqrestore(&led_value_lock, flags); } static struct led_classdev raq_web_led = { .name = "raq::web", .brightness_set = raq_web_led_set, }; static void raq_power_off_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { unsigned long flags; spin_lock_irqsave(&led_value_lock, flags); if (brightness) led_value |= LED_POWER_OFF; else led_value &= ~LED_POWER_OFF; writeb(led_value, led_port); spin_unlock_irqrestore(&led_value_lock, flags); } static struct led_classdev raq_power_off_led = { .name = "raq::power-off", .brightness_set = raq_power_off_led_set, .default_trigger = "power-off", }; static int __devinit cobalt_raq_led_probe(struct platform_device *pdev) { struct resource *res; int retval; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EBUSY; led_port = ioremap(res->start, res->end - res->start + 1); if (!led_port) return -ENOMEM; retval = led_classdev_register(&pdev->dev, &raq_power_off_led); if (retval) goto err_iounmap; retval = led_classdev_register(&pdev->dev, &raq_web_led); if (retval) goto err_unregister; return 0; err_unregister: led_classdev_unregister(&raq_power_off_led); err_iounmap: iounmap(led_port); led_port = NULL; return retval; } static int __devexit cobalt_raq_led_remove(struct platform_device *pdev) { led_classdev_unregister(&raq_power_off_led); led_classdev_unregister(&raq_web_led); if (led_port) { iounmap(led_port); led_port = NULL; } return 0; } static struct platform_driver cobalt_raq_led_driver = { .probe = cobalt_raq_led_probe, .remove = __devexit_p(cobalt_raq_led_remove), .driver = { .name = "cobalt-raq-leds", .owner = THIS_MODULE, }, }; static int __init cobalt_raq_led_init(void) { return platform_driver_register(&cobalt_raq_led_driver); } module_init(cobalt_raq_led_init);
gpl-2.0
ravikwow/jordan-kernel
arch/microblaze/kernel/sys_microblaze.c
518
2544
/* * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2007 John Williams <john.williams@petalogix.com> * * Copyright (C) 2006 Atmark Techno, Inc. * Yasushi SHOJI <yashi@atmark-techno.com> * Tetsuya OHKAWA <tetsuya@atmark-techno.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/errno.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/syscalls.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/sys.h> #include <linux/ipc.h> #include <linux/file.h> #include <linux/module.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/semaphore.h> #include <linux/uaccess.h> #include <linux/unistd.h> #include <asm/syscalls.h> asmlinkage long microblaze_vfork(struct pt_regs *regs) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->r1, regs, 0, NULL, NULL); } asmlinkage long microblaze_clone(int flags, unsigned long stack, struct pt_regs *regs) { if (!stack) stack = regs->r1; return do_fork(flags, stack, regs, 0, NULL, NULL); } asmlinkage long microblaze_execve(char __user *filenamei, char __user *__user *argv, char __user *__user *envp, struct pt_regs *regs) { int error; char *filename; filename = getname(filenamei); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, argv, envp, regs); putname(filename); out: return error; } asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, off_t pgoff) { if (pgoff & ~PAGE_MASK) return -EINVAL; return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); } /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ int kernel_execve(const char *filename, char *const argv[], char *const envp[]) { register const char *__a __asm__("r5") = filename; register const void *__b __asm__("r6") = argv; register const void *__c __asm__("r7") = envp; register unsigned long __syscall __asm__("r12") = __NR_execve; register unsigned long __ret __asm__("r3"); __asm__ __volatile__ ("brki r14, 0x8" : "=r" (__ret), "=r" (__syscall) : "1" (__syscall), "r" (__a), "r" (__b), "r" (__c) : "r4", "r8", "r9", "r10", "r11", "r14", "cc", "memory"); return __ret; }
gpl-2.0
ultravioletnanokitty/android_kernel_zte_smarttab
arch/mips/alchemy/common/clocks.c
774
3530
/* * BRIEF MODULE DESCRIPTION * Simple Au1xx0 clocks routines. * * Copyright 2001, 2008 MontaVista Software Inc. * Author: MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/spinlock.h> #include <asm/time.h> #include <asm/mach-au1x00/au1000.h> /* * I haven't found anyone that doesn't use a 12 MHz source clock, * but just in case..... */ #define AU1000_SRC_CLK 12000000 static unsigned int au1x00_clock; /* Hz */ static unsigned long uart_baud_base; /* * Set the au1000_clock */ void set_au1x00_speed(unsigned int new_freq) { au1x00_clock = new_freq; } unsigned int get_au1x00_speed(void) { return au1x00_clock; } EXPORT_SYMBOL(get_au1x00_speed); /* * The UART baud base is not known at compile time ... if * we want to be able to use the same code on different * speed CPUs. */ unsigned long get_au1x00_uart_baud_base(void) { return uart_baud_base; } void set_au1x00_uart_baud_base(unsigned long new_baud_base) { uart_baud_base = new_baud_base; } /* * We read the real processor speed from the PLL. This is important * because it is more accurate than computing it from the 32 KHz * counter, if it exists. If we don't have an accurate processor * speed, all of the peripherals that derive their clocks based on * this advertised speed will introduce error and sometimes not work * properly. This function is futher convoluted to still allow configurations * to do that in case they have really, really old silicon with a * write-only PLL register. -- Dan */ unsigned long au1xxx_calc_clock(void) { unsigned long cpu_speed; /* * On early Au1000, sys_cpupll was write-only. Since these * silicon versions of Au1000 are not sold by AMD, we don't bend * over backwards trying to determine the frequency. */ if (au1xxx_cpu_has_pll_wo()) #ifdef CONFIG_SOC_AU1000_FREQUENCY cpu_speed = CONFIG_SOC_AU1000_FREQUENCY; #else cpu_speed = 396000000; #endif else cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK; /* On Alchemy CPU:counter ratio is 1:1 */ mips_hpt_frequency = cpu_speed; /* Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) */ set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL) & 0x03) + 2) * 16)); set_au1x00_speed(cpu_speed); return cpu_speed; }
gpl-2.0
Team-Hydra/android_kernel_htc_msm8660-caf
drivers/gpio/pca953x.c
1542
17438
/* * pca953x.c - 4/8/16 bit I/O ports * * Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com> * Copyright (C) 2007 Marvell International Ltd. * * Derived from drivers/i2c/chips/pca9539.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. */ #include <linux/module.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/i2c.h> #include <linux/i2c/pca953x.h> #include <linux/slab.h> #ifdef CONFIG_OF_GPIO #include <linux/of_platform.h> #include <linux/of_gpio.h> #endif #define PCA953X_INPUT 0 #define PCA953X_OUTPUT 1 #define PCA953X_INVERT 2 #define PCA953X_DIRECTION 3 #define PCA957X_IN 0 #define PCA957X_INVRT 1 #define PCA957X_BKEN 2 #define PCA957X_PUPD 3 #define PCA957X_CFG 4 #define PCA957X_OUT 5 #define PCA957X_MSK 6 #define PCA957X_INTS 7 #define PCA_GPIO_MASK 0x00FF #define PCA_INT 0x0100 #define PCA953X_TYPE 0x1000 #define PCA957X_TYPE 0x2000 static const struct i2c_device_id pca953x_id[] = { { "pca9534", 8 | PCA953X_TYPE | PCA_INT, }, { "pca9535", 16 | PCA953X_TYPE | PCA_INT, }, { "pca9536", 4 | PCA953X_TYPE, }, { "pca9537", 4 | PCA953X_TYPE | PCA_INT, }, { "pca9538", 8 | PCA953X_TYPE | PCA_INT, }, { "pca9539", 16 | PCA953X_TYPE | PCA_INT, }, { "pca9554", 8 | PCA953X_TYPE | PCA_INT, }, { "pca9555", 16 | PCA953X_TYPE | PCA_INT, }, { "pca9556", 8 | PCA953X_TYPE, }, { "pca9557", 8 | PCA953X_TYPE, }, { "pca9574", 8 | PCA957X_TYPE | PCA_INT, }, { "pca9575", 16 | PCA957X_TYPE | PCA_INT, }, { "max7310", 8 | PCA953X_TYPE, }, { "max7312", 16 | PCA953X_TYPE | PCA_INT, }, { "max7313", 16 | PCA953X_TYPE | PCA_INT, }, { "max7315", 8 | PCA953X_TYPE | PCA_INT, }, { "pca6107", 8 | PCA953X_TYPE | PCA_INT, }, { "tca6408", 8 | PCA953X_TYPE | PCA_INT, }, { "tca6416", 16 | PCA953X_TYPE | PCA_INT, }, /* NYET: { "tca6424", 24, }, */ { } }; MODULE_DEVICE_TABLE(i2c, pca953x_id); struct pca953x_chip { unsigned gpio_start; uint16_t reg_output; uint16_t reg_direction; struct mutex i2c_lock; #ifdef CONFIG_GPIO_PCA953X_IRQ struct mutex irq_lock; uint16_t irq_mask; uint16_t irq_stat; uint16_t irq_trig_raise; uint16_t irq_trig_fall; int irq_base; #endif struct i2c_client *client; struct pca953x_platform_data *dyn_pdata; struct gpio_chip gpio_chip; const char *const *names; int chip_type; }; static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val) { int ret = 0; if (chip->gpio_chip.ngpio <= 8) ret = i2c_smbus_write_byte_data(chip->client, reg, val); else { switch (chip->chip_type) { case PCA953X_TYPE: ret = i2c_smbus_write_word_data(chip->client, reg << 1, val); break; case PCA957X_TYPE: ret = i2c_smbus_write_byte_data(chip->client, reg << 1, val & 0xff); if (ret < 0) break; ret = i2c_smbus_write_byte_data(chip->client, (reg << 1) + 1, (val & 0xff00) >> 8); break; } } if (ret < 0) { dev_err(&chip->client->dev, "failed writing register\n"); return ret; } return 0; } static int pca953x_read_reg(struct pca953x_chip *chip, int reg, uint16_t *val) { int ret; if (chip->gpio_chip.ngpio <= 8) ret = i2c_smbus_read_byte_data(chip->client, reg); else ret = i2c_smbus_read_word_data(chip->client, reg << 1); if (ret < 0) { dev_err(&chip->client->dev, "failed reading register\n"); return ret; } *val = (uint16_t)ret; return 0; } static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off) { struct pca953x_chip *chip; uint16_t reg_val; int ret, offset = 0; chip = container_of(gc, struct pca953x_chip, gpio_chip); mutex_lock(&chip->i2c_lock); reg_val = chip->reg_direction | (1u << off); switch (chip->chip_type) { case PCA953X_TYPE: offset = PCA953X_DIRECTION; break; case PCA957X_TYPE: offset = PCA957X_CFG; break; } ret = pca953x_write_reg(chip, offset, reg_val); if (ret) goto exit; chip->reg_direction = reg_val; ret = 0; exit: mutex_unlock(&chip->i2c_lock); return ret; } static int pca953x_gpio_direction_output(struct gpio_chip *gc, unsigned off, int val) { struct pca953x_chip *chip; uint16_t reg_val; int ret, offset = 0; chip = container_of(gc, struct pca953x_chip, gpio_chip); mutex_lock(&chip->i2c_lock); /* set output level */ if (val) reg_val = chip->reg_output | (1u << off); else reg_val = chip->reg_output & ~(1u << off); switch (chip->chip_type) { case PCA953X_TYPE: offset = PCA953X_OUTPUT; break; case PCA957X_TYPE: offset = PCA957X_OUT; break; } ret = pca953x_write_reg(chip, offset, reg_val); if (ret) goto exit; chip->reg_output = reg_val; /* then direction */ reg_val = chip->reg_direction & ~(1u << off); switch (chip->chip_type) { case PCA953X_TYPE: offset = PCA953X_DIRECTION; break; case PCA957X_TYPE: offset = PCA957X_CFG; break; } ret = pca953x_write_reg(chip, offset, reg_val); if (ret) goto exit; chip->reg_direction = reg_val; ret = 0; exit: mutex_unlock(&chip->i2c_lock); return ret; } static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) { struct pca953x_chip *chip; uint16_t reg_val; int ret, offset = 0; chip = container_of(gc, struct pca953x_chip, gpio_chip); mutex_lock(&chip->i2c_lock); switch (chip->chip_type) { case PCA953X_TYPE: offset = PCA953X_INPUT; break; case PCA957X_TYPE: offset = PCA957X_IN; break; } ret = pca953x_read_reg(chip, offset, &reg_val); mutex_unlock(&chip->i2c_lock); if (ret < 0) { /* NOTE: diagnostic already emitted; that's all we should * do unless gpio_*_value_cansleep() calls become different * from their nonsleeping siblings (and report faults). */ return 0; } return (reg_val & (1u << off)) ? 1 : 0; } static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val) { struct pca953x_chip *chip; uint16_t reg_val; int ret, offset = 0; chip = container_of(gc, struct pca953x_chip, gpio_chip); mutex_lock(&chip->i2c_lock); if (val) reg_val = chip->reg_output | (1u << off); else reg_val = chip->reg_output & ~(1u << off); switch (chip->chip_type) { case PCA953X_TYPE: offset = PCA953X_OUTPUT; break; case PCA957X_TYPE: offset = PCA957X_OUT; break; } ret = pca953x_write_reg(chip, offset, reg_val); if (ret) goto exit; chip->reg_output = reg_val; exit: mutex_unlock(&chip->i2c_lock); } static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios) { struct gpio_chip *gc; gc = &chip->gpio_chip; gc->direction_input = pca953x_gpio_direction_input; gc->direction_output = pca953x_gpio_direction_output; gc->get = pca953x_gpio_get_value; gc->set = pca953x_gpio_set_value; gc->can_sleep = 1; gc->base = chip->gpio_start; gc->ngpio = gpios; gc->label = chip->client->name; gc->dev = &chip->client->dev; gc->owner = THIS_MODULE; gc->names = chip->names; } #ifdef CONFIG_GPIO_PCA953X_IRQ static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned off) { struct pca953x_chip *chip; chip = container_of(gc, struct pca953x_chip, gpio_chip); return chip->irq_base + off; } static void pca953x_irq_mask(struct irq_data *d) { struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); chip->irq_mask &= ~(1 << (d->irq - chip->irq_base)); } static void pca953x_irq_unmask(struct irq_data *d) { struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); chip->irq_mask |= 1 << (d->irq - chip->irq_base); } static void pca953x_irq_bus_lock(struct irq_data *d) { struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); mutex_lock(&chip->irq_lock); } static void pca953x_irq_bus_sync_unlock(struct irq_data *d) { struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); uint16_t new_irqs; uint16_t level; /* Look for any newly setup interrupt */ new_irqs = chip->irq_trig_fall | chip->irq_trig_raise; new_irqs &= ~chip->reg_direction; while (new_irqs) { level = __ffs(new_irqs); pca953x_gpio_direction_input(&chip->gpio_chip, level); new_irqs &= ~(1 << level); } mutex_unlock(&chip->irq_lock); } static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) { struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); uint16_t level = d->irq - chip->irq_base; uint16_t mask = 1 << level; if (!(type & IRQ_TYPE_EDGE_BOTH)) { dev_err(&chip->client->dev, "irq %d: unsupported type %d\n", d->irq, type); return -EINVAL; } if (type & IRQ_TYPE_EDGE_FALLING) chip->irq_trig_fall |= mask; else chip->irq_trig_fall &= ~mask; if (type & IRQ_TYPE_EDGE_RISING) chip->irq_trig_raise |= mask; else chip->irq_trig_raise &= ~mask; return 0; } static struct irq_chip pca953x_irq_chip = { .name = "pca953x", .irq_mask = pca953x_irq_mask, .irq_unmask = pca953x_irq_unmask, .irq_bus_lock = pca953x_irq_bus_lock, .irq_bus_sync_unlock = pca953x_irq_bus_sync_unlock, .irq_set_type = pca953x_irq_set_type, }; static uint16_t pca953x_irq_pending(struct pca953x_chip *chip) { uint16_t cur_stat; uint16_t old_stat; uint16_t pending; uint16_t trigger; int ret, offset = 0; switch (chip->chip_type) { case PCA953X_TYPE: offset = PCA953X_INPUT; break; case PCA957X_TYPE: offset = PCA957X_IN; break; } ret = pca953x_read_reg(chip, offset, &cur_stat); if (ret) return 0; /* Remove output pins from the equation */ cur_stat &= chip->reg_direction; old_stat = chip->irq_stat; trigger = (cur_stat ^ old_stat) & chip->irq_mask; if (!trigger) return 0; chip->irq_stat = cur_stat; pending = (old_stat & chip->irq_trig_fall) | (cur_stat & chip->irq_trig_raise); pending &= trigger; return pending; } static irqreturn_t pca953x_irq_handler(int irq, void *devid) { struct pca953x_chip *chip = devid; uint16_t pending; uint16_t level; pending = pca953x_irq_pending(chip); if (!pending) return IRQ_HANDLED; do { level = __ffs(pending); handle_nested_irq(level + chip->irq_base); pending &= ~(1 << level); } while (pending); return IRQ_HANDLED; } static int pca953x_irq_setup(struct pca953x_chip *chip, const struct i2c_device_id *id) { struct i2c_client *client = chip->client; struct pca953x_platform_data *pdata = client->dev.platform_data; int ret, offset = 0; if (pdata->irq_base != -1 && (id->driver_data & PCA_INT)) { int lvl; switch (chip->chip_type) { case PCA953X_TYPE: offset = PCA953X_INPUT; break; case PCA957X_TYPE: offset = PCA957X_IN; break; } ret = pca953x_read_reg(chip, offset, &chip->irq_stat); if (ret) goto out_failed; /* * There is no way to know which GPIO line generated the * interrupt. We have to rely on the previous read for * this purpose. */ chip->irq_stat &= chip->reg_direction; chip->irq_base = pdata->irq_base; mutex_init(&chip->irq_lock); for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) { int irq = lvl + chip->irq_base; irq_set_chip_data(irq, chip); irq_set_chip(irq, &pca953x_irq_chip); irq_set_nested_thread(irq, true); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif } ret = request_threaded_irq(client->irq, NULL, pca953x_irq_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, dev_name(&client->dev), chip); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", client->irq); goto out_failed; } chip->gpio_chip.to_irq = pca953x_gpio_to_irq; } return 0; out_failed: chip->irq_base = -1; return ret; } static void pca953x_irq_teardown(struct pca953x_chip *chip) { if (chip->irq_base != -1) free_irq(chip->client->irq, chip); } #else /* CONFIG_GPIO_PCA953X_IRQ */ static int pca953x_irq_setup(struct pca953x_chip *chip, const struct i2c_device_id *id) { struct i2c_client *client = chip->client; struct pca953x_platform_data *pdata = client->dev.platform_data; if (pdata->irq_base != -1 && (id->driver_data & PCA_INT)) dev_warn(&client->dev, "interrupt support not compiled in\n"); return 0; } static void pca953x_irq_teardown(struct pca953x_chip *chip) { } #endif /* * Handlers for alternative sources of platform_data */ #ifdef CONFIG_OF_GPIO /* * Translate OpenFirmware node properties into platform_data */ static struct pca953x_platform_data * pca953x_get_alt_pdata(struct i2c_client *client) { struct pca953x_platform_data *pdata; struct device_node *node; const __be32 *val; int size; node = client->dev.of_node; if (node == NULL) return NULL; pdata = kzalloc(sizeof(struct pca953x_platform_data), GFP_KERNEL); if (pdata == NULL) { dev_err(&client->dev, "Unable to allocate platform_data\n"); return NULL; } pdata->gpio_base = -1; val = of_get_property(node, "linux,gpio-base", &size); if (val) { if (size != sizeof(*val)) dev_warn(&client->dev, "%s: wrong linux,gpio-base\n", node->full_name); else pdata->gpio_base = be32_to_cpup(val); } val = of_get_property(node, "polarity", NULL); if (val) pdata->invert = *val; return pdata; } #else static struct pca953x_platform_data * pca953x_get_alt_pdata(struct i2c_client *client) { return NULL; } #endif static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert) { int ret; ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output); if (ret) goto out; ret = pca953x_read_reg(chip, PCA953X_DIRECTION, &chip->reg_direction); if (ret) goto out; /* set platform specific polarity inversion */ ret = pca953x_write_reg(chip, PCA953X_INVERT, invert); if (ret) goto out; return 0; out: return ret; } static int __devinit device_pca957x_init(struct pca953x_chip *chip, int invert) { int ret; uint16_t val = 0; /* Let every port in proper state, that could save power */ pca953x_write_reg(chip, PCA957X_PUPD, 0x0); pca953x_write_reg(chip, PCA957X_CFG, 0xffff); pca953x_write_reg(chip, PCA957X_OUT, 0x0); ret = pca953x_read_reg(chip, PCA957X_IN, &val); if (ret) goto out; ret = pca953x_read_reg(chip, PCA957X_OUT, &chip->reg_output); if (ret) goto out; ret = pca953x_read_reg(chip, PCA957X_CFG, &chip->reg_direction); if (ret) goto out; /* set platform specific polarity inversion */ pca953x_write_reg(chip, PCA957X_INVRT, invert); /* To enable register 6, 7 to controll pull up and pull down */ pca953x_write_reg(chip, PCA957X_BKEN, 0x202); return 0; out: return ret; } static int __devinit pca953x_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct pca953x_platform_data *pdata; struct pca953x_chip *chip; int ret = 0; chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); if (chip == NULL) return -ENOMEM; pdata = client->dev.platform_data; if (pdata == NULL) { pdata = pca953x_get_alt_pdata(client); /* * Unlike normal platform_data, this is allocated * dynamically and must be freed in the driver */ chip->dyn_pdata = pdata; } if (pdata == NULL) { dev_dbg(&client->dev, "no platform data\n"); ret = -EINVAL; goto out_failed; } chip->client = client; chip->gpio_start = pdata->gpio_base; chip->names = pdata->names; chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE); mutex_init(&chip->i2c_lock); /* initialize cached registers from their original values. * we can't share this chip with another i2c master. */ pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK); if (chip->chip_type == PCA953X_TYPE) device_pca953x_init(chip, pdata->invert); else if (chip->chip_type == PCA957X_TYPE) device_pca957x_init(chip, pdata->invert); else goto out_failed; ret = pca953x_irq_setup(chip, id); if (ret) goto out_failed; ret = gpiochip_add(&chip->gpio_chip); if (ret) goto out_failed_irq; if (pdata->setup) { ret = pdata->setup(client, chip->gpio_chip.base, chip->gpio_chip.ngpio, pdata->context); if (ret < 0) dev_warn(&client->dev, "setup failed, %d\n", ret); } i2c_set_clientdata(client, chip); return 0; out_failed_irq: pca953x_irq_teardown(chip); out_failed: kfree(chip->dyn_pdata); kfree(chip); return ret; } static int pca953x_remove(struct i2c_client *client) { struct pca953x_platform_data *pdata = client->dev.platform_data; struct pca953x_chip *chip = i2c_get_clientdata(client); int ret = 0; if (pdata->teardown) { ret = pdata->teardown(client, chip->gpio_chip.base, chip->gpio_chip.ngpio, pdata->context); if (ret < 0) { dev_err(&client->dev, "%s failed, %d\n", "teardown", ret); return ret; } } ret = gpiochip_remove(&chip->gpio_chip); if (ret) { dev_err(&client->dev, "%s failed, %d\n", "gpiochip_remove()", ret); return ret; } pca953x_irq_teardown(chip); kfree(chip->dyn_pdata); kfree(chip); return 0; } static struct i2c_driver pca953x_driver = { .driver = { .name = "pca953x", }, .probe = pca953x_probe, .remove = pca953x_remove, .id_table = pca953x_id, }; static int __init pca953x_init(void) { return i2c_add_driver(&pca953x_driver); } /* register after i2c postcore initcall and before * subsys initcalls that may rely on these GPIOs */ subsys_initcall(pca953x_init); static void __exit pca953x_exit(void) { i2c_del_driver(&pca953x_driver); } module_exit(pca953x_exit); MODULE_AUTHOR("eric miao <eric.miao@marvell.com>"); MODULE_DESCRIPTION("GPIO expander driver for PCA953x"); MODULE_LICENSE("GPL");
gpl-2.0
elelinux/android_kernel_htc_pyramid
arch/mips/kernel/process.c
2310
11421
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2004 Thiemo Seufer */ #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/tick.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/personality.h> #include <linux/sys.h> #include <linux/user.h> #include <linux/init.h> #include <linux/completion.h> #include <linux/kallsyms.h> #include <linux/random.h> #include <asm/asm.h> #include <asm/bootinfo.h> #include <asm/cpu.h> #include <asm/dsp.h> #include <asm/fpu.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/mipsregs.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/elf.h> #include <asm/isadep.h> #include <asm/inst.h> #include <asm/stacktrace.h> /* * The idle thread. There's no useful work to be done, so just try to conserve * power and have a low exit latency (ie sit in a loop waiting for somebody to * say that they'd like to reschedule) */ void __noreturn cpu_idle(void) { int cpu; /* CPU is going idle. */ cpu = smp_processor_id(); /* endless idle loop with no priority at all */ while (1) { tick_nohz_stop_sched_tick(1); while (!need_resched() && cpu_online(cpu)) { #ifdef CONFIG_MIPS_MT_SMTC extern void smtc_idle_loop_hook(void); smtc_idle_loop_hook(); #endif if (cpu_wait) { /* Don't trace irqs off for idle */ stop_critical_timings(); (*cpu_wait)(); start_critical_timings(); } } #ifdef CONFIG_HOTPLUG_CPU if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map) && (system_state == SYSTEM_RUNNING || system_state == SYSTEM_BOOTING)) play_dead(); #endif tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); } } asmlinkage void ret_from_fork(void); void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) { unsigned long status; /* New thread loses kernel privileges. */ status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK); #ifdef CONFIG_64BIT status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR; #endif status |= KU_USER; regs->cp0_status = status; clear_used_math(); clear_fpu_owner(); if (cpu_has_dsp) __init_dsp(); regs->cp0_epc = pc; regs->regs[29] = sp; current_thread_info()->addr_limit = USER_DS; } void exit_thread(void) { } void flush_thread(void) { } int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs; unsigned long childksp; p->set_child_tid = p->clear_child_tid = NULL; childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; preempt_disable(); if (is_fpu_owner()) save_fp(p); if (cpu_has_dsp) save_dsp(p); preempt_enable(); /* set up new TSS. */ childregs = (struct pt_regs *) childksp - 1; /* Put the stack after the struct pt_regs. */ childksp = (unsigned long) childregs; *childregs = *regs; childregs->regs[7] = 0; /* Clear error flag */ childregs->regs[2] = 0; /* Child gets zero as return value */ if (childregs->cp0_status & ST0_CU0) { childregs->regs[28] = (unsigned long) ti; childregs->regs[29] = childksp; ti->addr_limit = KERNEL_DS; } else { childregs->regs[29] = usp; ti->addr_limit = USER_DS; } p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; /* * New tasks lose permission to use the fpu. This accelerates context * switching for most programs since they don't use the fpu. */ p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); #ifdef CONFIG_MIPS_MT_SMTC /* * SMTC restores TCStatus after Status, and the CU bits * are aliased there. */ childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); #endif clear_tsk_thread_flag(p, TIF_USEDFPU); #ifdef CONFIG_MIPS_MT_FPAFF clear_tsk_thread_flag(p, TIF_FPUBOUND); #endif /* CONFIG_MIPS_MT_FPAFF */ if (clone_flags & CLONE_SETTLS) ti->tp_value = regs->regs[7]; return 0; } /* Fill in the fpu structure for a core dump.. */ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) { memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu)); return 1; } void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs) { int i; for (i = 0; i < EF_R0; i++) gp[i] = 0; gp[EF_R0] = 0; for (i = 1; i <= 31; i++) gp[EF_R0 + i] = regs->regs[i]; gp[EF_R26] = 0; gp[EF_R27] = 0; gp[EF_LO] = regs->lo; gp[EF_HI] = regs->hi; gp[EF_CP0_EPC] = regs->cp0_epc; gp[EF_CP0_BADVADDR] = regs->cp0_badvaddr; gp[EF_CP0_STATUS] = regs->cp0_status; gp[EF_CP0_CAUSE] = regs->cp0_cause; #ifdef EF_UNUSED0 gp[EF_UNUSED0] = 0; #endif } int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) { elf_dump_regs(*regs, task_pt_regs(tsk)); return 1; } int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr) { memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu)); return 1; } /* * Create a kernel thread */ static void __noreturn kernel_thread_helper(void *arg, int (*fn)(void *)) { do_exit(fn(arg)); } long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) { struct pt_regs regs; memset(&regs, 0, sizeof(regs)); regs.regs[4] = (unsigned long) arg; regs.regs[5] = (unsigned long) fn; regs.cp0_epc = (unsigned long) kernel_thread_helper; regs.cp0_status = read_c0_status(); #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) regs.cp0_status = (regs.cp0_status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | ((regs.cp0_status & (ST0_KUC | ST0_IEC)) << 2); #else regs.cp0_status |= ST0_EXL; #endif /* Ok, create the new process.. */ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); } /* * */ struct mips_frame_info { void *func; unsigned long func_size; int frame_size; int pc_offset; }; static inline int is_ra_save_ins(union mips_instruction *ip) { /* sw / sd $ra, offset($sp) */ return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && ip->i_format.rs == 29 && ip->i_format.rt == 31; } static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) { if (ip->j_format.opcode == jal_op) return 1; if (ip->r_format.opcode != spec_op) return 0; return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; } static inline int is_sp_move_ins(union mips_instruction *ip) { /* addiu/daddiu sp,sp,-imm */ if (ip->i_format.rs != 29 || ip->i_format.rt != 29) return 0; if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) return 1; return 0; } static int get_frame_info(struct mips_frame_info *info) { union mips_instruction *ip = info->func; unsigned max_insns = info->func_size / sizeof(union mips_instruction); unsigned i; info->pc_offset = -1; info->frame_size = 0; if (!ip) goto err; if (max_insns == 0) max_insns = 128U; /* unknown function size */ max_insns = min(128U, max_insns); for (i = 0; i < max_insns; i++, ip++) { if (is_jal_jalr_jr_ins(ip)) break; if (!info->frame_size) { if (is_sp_move_ins(ip)) info->frame_size = - ip->i_format.simmediate; continue; } if (info->pc_offset == -1 && is_ra_save_ins(ip)) { info->pc_offset = ip->i_format.simmediate / sizeof(long); break; } } if (info->frame_size && info->pc_offset >= 0) /* nested */ return 0; if (info->pc_offset < 0) /* leaf */ return 1; /* prologue seems boggus... */ err: return -1; } static struct mips_frame_info schedule_mfi __read_mostly; static int __init frame_info_init(void) { unsigned long size = 0; #ifdef CONFIG_KALLSYMS unsigned long ofs; kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); #endif schedule_mfi.func = schedule; schedule_mfi.func_size = size; get_frame_info(&schedule_mfi); /* * Without schedule() frame info, result given by * thread_saved_pc() and get_wchan() are not reliable. */ if (schedule_mfi.pc_offset < 0) printk("Can't analyze schedule() prologue at %p\n", schedule); return 0; } arch_initcall(frame_info_init); /* * Return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *tsk) { struct thread_struct *t = &tsk->thread; /* New born processes are a special case */ if (t->reg31 == (unsigned long) ret_from_fork) return t->reg31; if (schedule_mfi.pc_offset < 0) return 0; return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset]; } #ifdef CONFIG_KALLSYMS /* used by show_backtrace() */ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, unsigned long pc, unsigned long *ra) { unsigned long stack_page; struct mips_frame_info info; unsigned long size, ofs; int leaf; extern void ret_from_irq(void); extern void ret_from_exception(void); stack_page = (unsigned long)task_stack_page(task); if (!stack_page) return 0; /* * If we reached the bottom of interrupt context, * return saved pc in pt_regs. */ if (pc == (unsigned long)ret_from_irq || pc == (unsigned long)ret_from_exception) { struct pt_regs *regs; if (*sp >= stack_page && *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { regs = (struct pt_regs *)*sp; pc = regs->cp0_epc; if (__kernel_text_address(pc)) { *sp = regs->regs[29]; *ra = regs->regs[31]; return pc; } } return 0; } if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) return 0; /* * Return ra if an exception occurred at the first instruction */ if (unlikely(ofs == 0)) { pc = *ra; *ra = 0; return pc; } info.func = (void *)(pc - ofs); info.func_size = ofs; /* analyze from start to ofs */ leaf = get_frame_info(&info); if (leaf < 0) return 0; if (*sp < stack_page || *sp + info.frame_size > stack_page + THREAD_SIZE - 32) return 0; if (leaf) /* * For some extreme cases, get_frame_info() can * consider wrongly a nested function as a leaf * one. In that cases avoid to return always the * same value. */ pc = pc != *ra ? *ra : 0; else pc = ((unsigned long *)(*sp))[info.pc_offset]; *sp += info.frame_size; *ra = 0; return __kernel_text_address(pc) ? pc : 0; } #endif /* * get_wchan - a maintenance nightmare^W^Wpain in the ass ... */ unsigned long get_wchan(struct task_struct *task) { unsigned long pc = 0; #ifdef CONFIG_KALLSYMS unsigned long sp; unsigned long ra = 0; #endif if (!task || task == current || task->state == TASK_RUNNING) goto out; if (!task_stack_page(task)) goto out; pc = thread_saved_pc(task); #ifdef CONFIG_KALLSYMS sp = task->thread.reg29 + schedule_mfi.frame_size; while (in_sched_functions(pc)) pc = unwind_stack(task, &sp, pc, &ra); #endif out: return pc; } /* * Don't forget that the stack pointer must be aligned on a 8 bytes * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. */ unsigned long arch_align_stack(unsigned long sp) { if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) sp -= get_random_int() & ~PAGE_MASK; return sp & ALMASK; }
gpl-2.0
CyanogenMod/android_kernel_oppo_msm8939
arch/arm/mach-shmobile/clock.c
2566
1531
/* * SH-Mobile Clock Framework * * Copyright (C) 2010 Magnus Damm * * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sh_clk.h> #include <linux/export.h> #include <mach/clock.h> #include <mach/common.h> unsigned long shmobile_fixed_ratio_clk_recalc(struct clk *clk) { struct clk_ratio *p = clk->priv; return clk->parent->rate / p->div * p->mul; }; struct sh_clk_ops shmobile_fixed_ratio_clk_ops = { .recalc = shmobile_fixed_ratio_clk_recalc, }; int __init shmobile_clk_init(void) { /* Kick the child clocks.. */ recalculate_root_clocks(); /* Enable the necessary init clocks */ clk_enable_init_clocks(); return 0; } int __clk_get(struct clk *clk) { return 1; } EXPORT_SYMBOL(__clk_get); void __clk_put(struct clk *clk) { } EXPORT_SYMBOL(__clk_put);
gpl-2.0
TWRP-J5/android_kernel_samsung_j5lte
net/netrom/nr_route.c
2822
23486
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <net/arp.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/netfilter.h> #include <linux/init.h> #include <linux/spinlock.h> #include <net/netrom.h> #include <linux/seq_file.h> #include <linux/export.h> static unsigned int nr_neigh_no = 1; static HLIST_HEAD(nr_node_list); static DEFINE_SPINLOCK(nr_node_list_lock); static HLIST_HEAD(nr_neigh_list); static DEFINE_SPINLOCK(nr_neigh_list_lock); static struct nr_node *nr_node_get(ax25_address *callsign) { struct nr_node *found = NULL; struct nr_node *nr_node; spin_lock_bh(&nr_node_list_lock); nr_node_for_each(nr_node, &nr_node_list) if (ax25cmp(callsign, &nr_node->callsign) == 0) { nr_node_hold(nr_node); found = nr_node; break; } spin_unlock_bh(&nr_node_list_lock); return found; } static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign, struct net_device *dev) { struct nr_neigh *found = NULL; struct nr_neigh *nr_neigh; spin_lock_bh(&nr_neigh_list_lock); nr_neigh_for_each(nr_neigh, &nr_neigh_list) if (ax25cmp(callsign, &nr_neigh->callsign) == 0 && nr_neigh->dev == dev) { nr_neigh_hold(nr_neigh); found = nr_neigh; break; } spin_unlock_bh(&nr_neigh_list_lock); return found; } static void nr_remove_neigh(struct nr_neigh *); /* * Add a new route to a node, and in the process add the node and the * neighbour if it is new. */ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev, int quality, int obs_count) { struct nr_node *nr_node; struct nr_neigh *nr_neigh; struct nr_route nr_route; int i, found; struct net_device *odev; if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */ dev_put(odev); return -EINVAL; } nr_node = nr_node_get(nr); nr_neigh = nr_neigh_get_dev(ax25, dev); /* * The L2 link to a neighbour has failed in the past * and now a frame comes from this neighbour. We assume * it was a temporary trouble with the link and reset the * routes now (and not wait for a node broadcast). */ if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) { struct nr_node *nr_nodet; spin_lock_bh(&nr_node_list_lock); nr_node_for_each(nr_nodet, &nr_node_list) { nr_node_lock(nr_nodet); for (i = 0; i < nr_nodet->count; i++) if (nr_nodet->routes[i].neighbour == nr_neigh) if (i < nr_nodet->which) nr_nodet->which = i; nr_node_unlock(nr_nodet); } spin_unlock_bh(&nr_node_list_lock); } if (nr_neigh != NULL) nr_neigh->failed = 0; if (quality == 0 && nr_neigh != NULL && nr_node != NULL) { nr_neigh_put(nr_neigh); nr_node_put(nr_node); return 0; } if (nr_neigh == NULL) { if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) { if (nr_node) nr_node_put(nr_node); return -ENOMEM; } nr_neigh->callsign = *ax25; nr_neigh->digipeat = NULL; nr_neigh->ax25 = NULL; nr_neigh->dev = dev; nr_neigh->quality = sysctl_netrom_default_path_quality; nr_neigh->locked = 0; nr_neigh->count = 0; nr_neigh->number = nr_neigh_no++; nr_neigh->failed = 0; atomic_set(&nr_neigh->refcount, 1); if (ax25_digi != NULL && ax25_digi->ndigi > 0) { nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi), GFP_KERNEL); if (nr_neigh->digipeat == NULL) { kfree(nr_neigh); if (nr_node) nr_node_put(nr_node); return -ENOMEM; } } spin_lock_bh(&nr_neigh_list_lock); hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list); nr_neigh_hold(nr_neigh); spin_unlock_bh(&nr_neigh_list_lock); } if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked) nr_neigh->quality = quality; if (nr_node == NULL) { if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) { if (nr_neigh) nr_neigh_put(nr_neigh); return -ENOMEM; } nr_node->callsign = *nr; strcpy(nr_node->mnemonic, mnemonic); nr_node->which = 0; nr_node->count = 1; atomic_set(&nr_node->refcount, 1); spin_lock_init(&nr_node->node_lock); nr_node->routes[0].quality = quality; nr_node->routes[0].obs_count = obs_count; nr_node->routes[0].neighbour = nr_neigh; nr_neigh_hold(nr_neigh); nr_neigh->count++; spin_lock_bh(&nr_node_list_lock); hlist_add_head(&nr_node->node_node, &nr_node_list); /* refcount initialized at 1 */ spin_unlock_bh(&nr_node_list_lock); return 0; } nr_node_lock(nr_node); if (quality != 0) strcpy(nr_node->mnemonic, mnemonic); for (found = 0, i = 0; i < nr_node->count; i++) { if (nr_node->routes[i].neighbour == nr_neigh) { nr_node->routes[i].quality = quality; nr_node->routes[i].obs_count = obs_count; found = 1; break; } } if (!found) { /* We have space at the bottom, slot it in */ if (nr_node->count < 3) { nr_node->routes[2] = nr_node->routes[1]; nr_node->routes[1] = nr_node->routes[0]; nr_node->routes[0].quality = quality; nr_node->routes[0].obs_count = obs_count; nr_node->routes[0].neighbour = nr_neigh; nr_node->which++; nr_node->count++; nr_neigh_hold(nr_neigh); nr_neigh->count++; } else { /* It must be better than the worst */ if (quality > nr_node->routes[2].quality) { nr_node->routes[2].neighbour->count--; nr_neigh_put(nr_node->routes[2].neighbour); if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked) nr_remove_neigh(nr_node->routes[2].neighbour); nr_node->routes[2].quality = quality; nr_node->routes[2].obs_count = obs_count; nr_node->routes[2].neighbour = nr_neigh; nr_neigh_hold(nr_neigh); nr_neigh->count++; } } } /* Now re-sort the routes in quality order */ switch (nr_node->count) { case 3: if (nr_node->routes[1].quality > nr_node->routes[0].quality) { switch (nr_node->which) { case 0: nr_node->which = 1; break; case 1: nr_node->which = 0; break; } nr_route = nr_node->routes[0]; nr_node->routes[0] = nr_node->routes[1]; nr_node->routes[1] = nr_route; } if (nr_node->routes[2].quality > nr_node->routes[1].quality) { switch (nr_node->which) { case 1: nr_node->which = 2; break; case 2: nr_node->which = 1; break; default: break; } nr_route = nr_node->routes[1]; nr_node->routes[1] = nr_node->routes[2]; nr_node->routes[2] = nr_route; } case 2: if (nr_node->routes[1].quality > nr_node->routes[0].quality) { switch (nr_node->which) { case 0: nr_node->which = 1; break; case 1: nr_node->which = 0; break; default: break; } nr_route = nr_node->routes[0]; nr_node->routes[0] = nr_node->routes[1]; nr_node->routes[1] = nr_route; } case 1: break; } for (i = 0; i < nr_node->count; i++) { if (nr_node->routes[i].neighbour == nr_neigh) { if (i < nr_node->which) nr_node->which = i; break; } } nr_neigh_put(nr_neigh); nr_node_unlock(nr_node); nr_node_put(nr_node); return 0; } static inline void __nr_remove_node(struct nr_node *nr_node) { hlist_del_init(&nr_node->node_node); nr_node_put(nr_node); } #define nr_remove_node_locked(__node) \ __nr_remove_node(__node) static void nr_remove_node(struct nr_node *nr_node) { spin_lock_bh(&nr_node_list_lock); __nr_remove_node(nr_node); spin_unlock_bh(&nr_node_list_lock); } static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh) { hlist_del_init(&nr_neigh->neigh_node); nr_neigh_put(nr_neigh); } #define nr_remove_neigh_locked(__neigh) \ __nr_remove_neigh(__neigh) static void nr_remove_neigh(struct nr_neigh *nr_neigh) { spin_lock_bh(&nr_neigh_list_lock); __nr_remove_neigh(nr_neigh); spin_unlock_bh(&nr_neigh_list_lock); } /* * "Delete" a node. Strictly speaking remove a route to a node. The node * is only deleted if no routes are left to it. */ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev) { struct nr_node *nr_node; struct nr_neigh *nr_neigh; int i; nr_node = nr_node_get(callsign); if (nr_node == NULL) return -EINVAL; nr_neigh = nr_neigh_get_dev(neighbour, dev); if (nr_neigh == NULL) { nr_node_put(nr_node); return -EINVAL; } nr_node_lock(nr_node); for (i = 0; i < nr_node->count; i++) { if (nr_node->routes[i].neighbour == nr_neigh) { nr_neigh->count--; nr_neigh_put(nr_neigh); if (nr_neigh->count == 0 && !nr_neigh->locked) nr_remove_neigh(nr_neigh); nr_neigh_put(nr_neigh); nr_node->count--; if (nr_node->count == 0) { nr_remove_node(nr_node); } else { switch (i) { case 0: nr_node->routes[0] = nr_node->routes[1]; case 1: nr_node->routes[1] = nr_node->routes[2]; case 2: break; } nr_node_put(nr_node); } nr_node_unlock(nr_node); return 0; } } nr_neigh_put(nr_neigh); nr_node_unlock(nr_node); nr_node_put(nr_node); return -EINVAL; } /* * Lock a neighbour with a quality. */ static int __must_check nr_add_neigh(ax25_address *callsign, ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality) { struct nr_neigh *nr_neigh; nr_neigh = nr_neigh_get_dev(callsign, dev); if (nr_neigh) { nr_neigh->quality = quality; nr_neigh->locked = 1; nr_neigh_put(nr_neigh); return 0; } if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) return -ENOMEM; nr_neigh->callsign = *callsign; nr_neigh->digipeat = NULL; nr_neigh->ax25 = NULL; nr_neigh->dev = dev; nr_neigh->quality = quality; nr_neigh->locked = 1; nr_neigh->count = 0; nr_neigh->number = nr_neigh_no++; nr_neigh->failed = 0; atomic_set(&nr_neigh->refcount, 1); if (ax25_digi != NULL && ax25_digi->ndigi > 0) { nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi), GFP_KERNEL); if (nr_neigh->digipeat == NULL) { kfree(nr_neigh); return -ENOMEM; } } spin_lock_bh(&nr_neigh_list_lock); hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list); /* refcount is initialized at 1 */ spin_unlock_bh(&nr_neigh_list_lock); return 0; } /* * "Delete" a neighbour. The neighbour is only removed if the number * of nodes that may use it is zero. */ static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality) { struct nr_neigh *nr_neigh; nr_neigh = nr_neigh_get_dev(callsign, dev); if (nr_neigh == NULL) return -EINVAL; nr_neigh->quality = quality; nr_neigh->locked = 0; if (nr_neigh->count == 0) nr_remove_neigh(nr_neigh); nr_neigh_put(nr_neigh); return 0; } /* * Decrement the obsolescence count by one. If a route is reduced to a * count of zero, remove it. Also remove any unlocked neighbours with * zero nodes routing via it. */ static int nr_dec_obs(void) { struct nr_neigh *nr_neigh; struct nr_node *s; struct hlist_node *nodet; int i; spin_lock_bh(&nr_node_list_lock); nr_node_for_each_safe(s, nodet, &nr_node_list) { nr_node_lock(s); for (i = 0; i < s->count; i++) { switch (s->routes[i].obs_count) { case 0: /* A locked entry */ break; case 1: /* From 1 -> 0 */ nr_neigh = s->routes[i].neighbour; nr_neigh->count--; nr_neigh_put(nr_neigh); if (nr_neigh->count == 0 && !nr_neigh->locked) nr_remove_neigh(nr_neigh); s->count--; switch (i) { case 0: s->routes[0] = s->routes[1]; /* Fallthrough */ case 1: s->routes[1] = s->routes[2]; case 2: break; } break; default: s->routes[i].obs_count--; break; } } if (s->count <= 0) nr_remove_node_locked(s); nr_node_unlock(s); } spin_unlock_bh(&nr_node_list_lock); return 0; } /* * A device has been removed. Remove its routes and neighbours. */ void nr_rt_device_down(struct net_device *dev) { struct nr_neigh *s; struct hlist_node *nodet, *node2t; struct nr_node *t; int i; spin_lock_bh(&nr_neigh_list_lock); nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) { if (s->dev == dev) { spin_lock_bh(&nr_node_list_lock); nr_node_for_each_safe(t, node2t, &nr_node_list) { nr_node_lock(t); for (i = 0; i < t->count; i++) { if (t->routes[i].neighbour == s) { t->count--; switch (i) { case 0: t->routes[0] = t->routes[1]; case 1: t->routes[1] = t->routes[2]; case 2: break; } } } if (t->count <= 0) nr_remove_node_locked(t); nr_node_unlock(t); } spin_unlock_bh(&nr_node_list_lock); nr_remove_neigh_locked(s); } } spin_unlock_bh(&nr_neigh_list_lock); } /* * Check that the device given is a valid AX.25 interface that is "up". * Or a valid ethernet interface with an AX.25 callsign binding. */ static struct net_device *nr_ax25_dev_get(char *devname) { struct net_device *dev; if ((dev = dev_get_by_name(&init_net, devname)) == NULL) return NULL; if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25) return dev; dev_put(dev); return NULL; } /* * Find the first active NET/ROM device, usually "nr0". */ struct net_device *nr_dev_first(void) { struct net_device *dev, *first = NULL; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) if (first == NULL || strncmp(dev->name, first->name, 3) < 0) first = dev; } if (first) dev_hold(first); rcu_read_unlock(); return first; } /* * Find the NET/ROM device for the given callsign. */ struct net_device *nr_dev_get(ax25_address *addr) { struct net_device *dev; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) { dev_hold(dev); goto out; } } dev = NULL; out: rcu_read_unlock(); return dev; } static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis, ax25_address *digipeaters) { int i; if (ndigis == 0) return NULL; for (i = 0; i < ndigis; i++) { digi->calls[i] = digipeaters[i]; digi->repeated[i] = 0; } digi->ndigi = ndigis; digi->lastrepeat = -1; return digi; } /* * Handle the ioctls that control the routing functions. */ int nr_rt_ioctl(unsigned int cmd, void __user *arg) { struct nr_route_struct nr_route; struct net_device *dev; ax25_digi digi; int ret; switch (cmd) { case SIOCADDRT: if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) return -EFAULT; if (nr_route.ndigis > AX25_MAX_DIGIS) return -EINVAL; if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) return -EINVAL; switch (nr_route.type) { case NETROM_NODE: if (strnlen(nr_route.mnemonic, 7) == 7) { ret = -EINVAL; break; } ret = nr_add_node(&nr_route.callsign, nr_route.mnemonic, &nr_route.neighbour, nr_call_to_digi(&digi, nr_route.ndigis, nr_route.digipeaters), dev, nr_route.quality, nr_route.obs_count); break; case NETROM_NEIGH: ret = nr_add_neigh(&nr_route.callsign, nr_call_to_digi(&digi, nr_route.ndigis, nr_route.digipeaters), dev, nr_route.quality); break; default: ret = -EINVAL; } dev_put(dev); return ret; case SIOCDELRT: if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) return -EFAULT; if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) return -EINVAL; switch (nr_route.type) { case NETROM_NODE: ret = nr_del_node(&nr_route.callsign, &nr_route.neighbour, dev); break; case NETROM_NEIGH: ret = nr_del_neigh(&nr_route.callsign, dev, nr_route.quality); break; default: ret = -EINVAL; } dev_put(dev); return ret; case SIOCNRDECOBS: return nr_dec_obs(); default: return -EINVAL; } return 0; } /* * A level 2 link has timed out, therefore it appears to be a poor link, * then don't use that neighbour until it is reset. */ void nr_link_failed(ax25_cb *ax25, int reason) { struct nr_neigh *s, *nr_neigh = NULL; struct nr_node *nr_node = NULL; spin_lock_bh(&nr_neigh_list_lock); nr_neigh_for_each(s, &nr_neigh_list) { if (s->ax25 == ax25) { nr_neigh_hold(s); nr_neigh = s; break; } } spin_unlock_bh(&nr_neigh_list_lock); if (nr_neigh == NULL) return; nr_neigh->ax25 = NULL; ax25_cb_put(ax25); if (++nr_neigh->failed < sysctl_netrom_link_fails_count) { nr_neigh_put(nr_neigh); return; } spin_lock_bh(&nr_node_list_lock); nr_node_for_each(nr_node, &nr_node_list) { nr_node_lock(nr_node); if (nr_node->which < nr_node->count && nr_node->routes[nr_node->which].neighbour == nr_neigh) nr_node->which++; nr_node_unlock(nr_node); } spin_unlock_bh(&nr_node_list_lock); nr_neigh_put(nr_neigh); } /* * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb * indicates an internally generated frame. */ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) { ax25_address *nr_src, *nr_dest; struct nr_neigh *nr_neigh; struct nr_node *nr_node; struct net_device *dev; unsigned char *dptr; ax25_cb *ax25s; int ret; struct sk_buff *skbn; nr_src = (ax25_address *)(skb->data + 0); nr_dest = (ax25_address *)(skb->data + 7); if (ax25 != NULL) { ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat, ax25->ax25_dev->dev, 0, sysctl_netrom_obsolescence_count_initialiser); if (ret) return ret; } if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */ if (ax25 == NULL) /* Its from me */ ret = nr_loopback_queue(skb); else ret = nr_rx_frame(skb, dev); dev_put(dev); return ret; } if (!sysctl_netrom_routing_control && ax25 != NULL) return 0; /* Its Time-To-Live has expired */ if (skb->data[14] == 1) { return 0; } nr_node = nr_node_get(nr_dest); if (nr_node == NULL) return 0; nr_node_lock(nr_node); if (nr_node->which >= nr_node->count) { nr_node_unlock(nr_node); nr_node_put(nr_node); return 0; } nr_neigh = nr_node->routes[nr_node->which].neighbour; if ((dev = nr_dev_first()) == NULL) { nr_node_unlock(nr_node); nr_node_put(nr_node); return 0; } /* We are going to change the netrom headers so we should get our own skb, we also did not know until now how much header space we had to reserve... - RXQ */ if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) { nr_node_unlock(nr_node); nr_node_put(nr_node); dev_put(dev); return 0; } kfree_skb(skb); skb=skbn; skb->data[14]--; dptr = skb_push(skb, 1); *dptr = AX25_P_NETROM; ax25s = nr_neigh->ax25; nr_neigh->ax25 = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); if (ax25s) ax25_cb_put(ax25s); dev_put(dev); ret = (nr_neigh->ax25 != NULL); nr_node_unlock(nr_node); nr_node_put(nr_node); return ret; } #ifdef CONFIG_PROC_FS static void *nr_node_start(struct seq_file *seq, loff_t *pos) { spin_lock_bh(&nr_node_list_lock); return seq_hlist_start_head(&nr_node_list, *pos); } static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &nr_node_list, pos); } static void nr_node_stop(struct seq_file *seq, void *v) { spin_unlock_bh(&nr_node_list_lock); } static int nr_node_show(struct seq_file *seq, void *v) { char buf[11]; int i; if (v == SEQ_START_TOKEN) seq_puts(seq, "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n"); else { struct nr_node *nr_node = hlist_entry(v, struct nr_node, node_node); nr_node_lock(nr_node); seq_printf(seq, "%-9s %-7s %d %d", ax2asc(buf, &nr_node->callsign), (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic, nr_node->which + 1, nr_node->count); for (i = 0; i < nr_node->count; i++) { seq_printf(seq, " %3d %d %05d", nr_node->routes[i].quality, nr_node->routes[i].obs_count, nr_node->routes[i].neighbour->number); } nr_node_unlock(nr_node); seq_puts(seq, "\n"); } return 0; } static const struct seq_operations nr_node_seqops = { .start = nr_node_start, .next = nr_node_next, .stop = nr_node_stop, .show = nr_node_show, }; static int nr_node_info_open(struct inode *inode, struct file *file) { return seq_open(file, &nr_node_seqops); } const struct file_operations nr_nodes_fops = { .owner = THIS_MODULE, .open = nr_node_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *nr_neigh_start(struct seq_file *seq, loff_t *pos) { spin_lock_bh(&nr_neigh_list_lock); return seq_hlist_start_head(&nr_neigh_list, *pos); } static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &nr_neigh_list, pos); } static void nr_neigh_stop(struct seq_file *seq, void *v) { spin_unlock_bh(&nr_neigh_list_lock); } static int nr_neigh_show(struct seq_file *seq, void *v) { char buf[11]; int i; if (v == SEQ_START_TOKEN) seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n"); else { struct nr_neigh *nr_neigh; nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node); seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d", nr_neigh->number, ax2asc(buf, &nr_neigh->callsign), nr_neigh->dev ? nr_neigh->dev->name : "???", nr_neigh->quality, nr_neigh->locked, nr_neigh->count, nr_neigh->failed); if (nr_neigh->digipeat != NULL) { for (i = 0; i < nr_neigh->digipeat->ndigi; i++) seq_printf(seq, " %s", ax2asc(buf, &nr_neigh->digipeat->calls[i])); } seq_puts(seq, "\n"); } return 0; } static const struct seq_operations nr_neigh_seqops = { .start = nr_neigh_start, .next = nr_neigh_next, .stop = nr_neigh_stop, .show = nr_neigh_show, }; static int nr_neigh_info_open(struct inode *inode, struct file *file) { return seq_open(file, &nr_neigh_seqops); } const struct file_operations nr_neigh_fops = { .owner = THIS_MODULE, .open = nr_neigh_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* * Free all memory associated with the nodes and routes lists. */ void __exit nr_rt_free(void) { struct nr_neigh *s = NULL; struct nr_node *t = NULL; struct hlist_node *nodet; spin_lock_bh(&nr_neigh_list_lock); spin_lock_bh(&nr_node_list_lock); nr_node_for_each_safe(t, nodet, &nr_node_list) { nr_node_lock(t); nr_remove_node_locked(t); nr_node_unlock(t); } nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) { while(s->count) { s->count--; nr_neigh_put(s); } nr_remove_neigh_locked(s); } spin_unlock_bh(&nr_node_list_lock); spin_unlock_bh(&nr_neigh_list_lock); }
gpl-2.0
VanirAOSP/kernel_samsung_codinalte
sound/pci/ice1712/ice1724.c
3590
78725
/* * ALSA driver for VT1724 ICEnsemble ICE1724 / VIA VT1724 (Envy24HT) * VIA VT1720 (Envy24PT) * * Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz> * 2002 James Stafford <jstafford@ampltd.com> * 2003 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/info.h> #include <sound/rawmidi.h> #include <sound/initval.h> #include <sound/asoundef.h> #include "ice1712.h" #include "envy24ht.h" /* lowlevel routines */ #include "amp.h" #include "revo.h" #include "aureon.h" #include "vt1720_mobo.h" #include "pontis.h" #include "prodigy192.h" #include "prodigy_hifi.h" #include "juli.h" #include "maya44.h" #include "phase.h" #include "wtm.h" #include "se.h" #include "quartet.h" MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("VIA ICEnsemble ICE1724/1720 (Envy24HT/PT)"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{" REVO_DEVICE_DESC AMP_AUDIO2000_DEVICE_DESC AUREON_DEVICE_DESC VT1720_MOBO_DEVICE_DESC PONTIS_DEVICE_DESC PRODIGY192_DEVICE_DESC PRODIGY_HIFI_DEVICE_DESC JULI_DEVICE_DESC MAYA44_DEVICE_DESC PHASE_DEVICE_DESC WTM_DEVICE_DESC SE_DEVICE_DESC QTET_DEVICE_DESC "{VIA,VT1720}," "{VIA,VT1724}," "{ICEnsemble,Generic ICE1724}," "{ICEnsemble,Generic Envy24HT}" "{ICEnsemble,Generic Envy24PT}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static char *model[SNDRV_CARDS]; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for ICE1724 soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for ICE1724 soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable ICE1724 soundcard."); module_param_array(model, charp, NULL, 0444); MODULE_PARM_DESC(model, "Use the given board model."); /* Both VT1720 and VT1724 have the same PCI IDs */ static DEFINE_PCI_DEVICE_TABLE(snd_vt1724_ids) = { { PCI_VDEVICE(ICE, PCI_DEVICE_ID_VT1724), 0 }, { 0, } }; MODULE_DEVICE_TABLE(pci, snd_vt1724_ids); static int PRO_RATE_LOCKED; static int PRO_RATE_RESET = 1; static unsigned int PRO_RATE_DEFAULT = 44100; static char *ext_clock_names[1] = { "IEC958 In" }; /* * Basic I/O */ /* * default rates, default clock routines */ /* check whether the clock mode is spdif-in */ static inline int stdclock_is_spdif_master(struct snd_ice1712 *ice) { return (inb(ICEMT1724(ice, RATE)) & VT1724_SPDIF_MASTER) ? 1 : 0; } /* * locking rate makes sense only for internal clock mode */ static inline int is_pro_rate_locked(struct snd_ice1712 *ice) { return (!ice->is_spdif_master(ice)) && PRO_RATE_LOCKED; } /* * ac97 section */ static unsigned char snd_vt1724_ac97_ready(struct snd_ice1712 *ice) { unsigned char old_cmd; int tm; for (tm = 0; tm < 0x10000; tm++) { old_cmd = inb(ICEMT1724(ice, AC97_CMD)); if (old_cmd & (VT1724_AC97_WRITE | VT1724_AC97_READ)) continue; if (!(old_cmd & VT1724_AC97_READY)) continue; return old_cmd; } snd_printd(KERN_ERR "snd_vt1724_ac97_ready: timeout\n"); return old_cmd; } static int snd_vt1724_ac97_wait_bit(struct snd_ice1712 *ice, unsigned char bit) { int tm; for (tm = 0; tm < 0x10000; tm++) if ((inb(ICEMT1724(ice, AC97_CMD)) & bit) == 0) return 0; snd_printd(KERN_ERR "snd_vt1724_ac97_wait_bit: timeout\n"); return -EIO; } static void snd_vt1724_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct snd_ice1712 *ice = ac97->private_data; unsigned char old_cmd; old_cmd = snd_vt1724_ac97_ready(ice); old_cmd &= ~VT1724_AC97_ID_MASK; old_cmd |= ac97->num; outb(reg, ICEMT1724(ice, AC97_INDEX)); outw(val, ICEMT1724(ice, AC97_DATA)); outb(old_cmd | VT1724_AC97_WRITE, ICEMT1724(ice, AC97_CMD)); snd_vt1724_ac97_wait_bit(ice, VT1724_AC97_WRITE); } static unsigned short snd_vt1724_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct snd_ice1712 *ice = ac97->private_data; unsigned char old_cmd; old_cmd = snd_vt1724_ac97_ready(ice); old_cmd &= ~VT1724_AC97_ID_MASK; old_cmd |= ac97->num; outb(reg, ICEMT1724(ice, AC97_INDEX)); outb(old_cmd | VT1724_AC97_READ, ICEMT1724(ice, AC97_CMD)); if (snd_vt1724_ac97_wait_bit(ice, VT1724_AC97_READ) < 0) return ~0; return inw(ICEMT1724(ice, AC97_DATA)); } /* * GPIO operations */ /* set gpio direction 0 = read, 1 = write */ static void snd_vt1724_set_gpio_dir(struct snd_ice1712 *ice, unsigned int data) { outl(data, ICEREG1724(ice, GPIO_DIRECTION)); inw(ICEREG1724(ice, GPIO_DIRECTION)); /* dummy read for pci-posting */ } /* get gpio direction 0 = read, 1 = write */ static unsigned int snd_vt1724_get_gpio_dir(struct snd_ice1712 *ice) { return inl(ICEREG1724(ice, GPIO_DIRECTION)); } /* set the gpio mask (0 = writable) */ static void snd_vt1724_set_gpio_mask(struct snd_ice1712 *ice, unsigned int data) { outw(data, ICEREG1724(ice, GPIO_WRITE_MASK)); if (!ice->vt1720) /* VT1720 supports only 16 GPIO bits */ outb((data >> 16) & 0xff, ICEREG1724(ice, GPIO_WRITE_MASK_22)); inw(ICEREG1724(ice, GPIO_WRITE_MASK)); /* dummy read for pci-posting */ } static unsigned int snd_vt1724_get_gpio_mask(struct snd_ice1712 *ice) { unsigned int mask; if (!ice->vt1720) mask = (unsigned int)inb(ICEREG1724(ice, GPIO_WRITE_MASK_22)); else mask = 0; mask = (mask << 16) | inw(ICEREG1724(ice, GPIO_WRITE_MASK)); return mask; } static void snd_vt1724_set_gpio_data(struct snd_ice1712 *ice, unsigned int data) { outw(data, ICEREG1724(ice, GPIO_DATA)); if (!ice->vt1720) outb(data >> 16, ICEREG1724(ice, GPIO_DATA_22)); inw(ICEREG1724(ice, GPIO_DATA)); /* dummy read for pci-posting */ } static unsigned int snd_vt1724_get_gpio_data(struct snd_ice1712 *ice) { unsigned int data; if (!ice->vt1720) data = (unsigned int)inb(ICEREG1724(ice, GPIO_DATA_22)); else data = 0; data = (data << 16) | inw(ICEREG1724(ice, GPIO_DATA)); return data; } /* * MIDI */ static void vt1724_midi_clear_rx(struct snd_ice1712 *ice) { unsigned int count; for (count = inb(ICEREG1724(ice, MPU_RXFIFO)); count > 0; --count) inb(ICEREG1724(ice, MPU_DATA)); } static inline struct snd_rawmidi_substream * get_rawmidi_substream(struct snd_ice1712 *ice, unsigned int stream) { return list_first_entry(&ice->rmidi[0]->streams[stream].substreams, struct snd_rawmidi_substream, list); } static void enable_midi_irq(struct snd_ice1712 *ice, u8 flag, int enable); static void vt1724_midi_write(struct snd_ice1712 *ice) { struct snd_rawmidi_substream *s; int count, i; u8 buffer[32]; s = get_rawmidi_substream(ice, SNDRV_RAWMIDI_STREAM_OUTPUT); count = 31 - inb(ICEREG1724(ice, MPU_TXFIFO)); if (count > 0) { count = snd_rawmidi_transmit(s, buffer, count); for (i = 0; i < count; ++i) outb(buffer[i], ICEREG1724(ice, MPU_DATA)); } /* mask irq when all bytes have been transmitted. * enabled again in output_trigger when the new data comes in. */ enable_midi_irq(ice, VT1724_IRQ_MPU_TX, !snd_rawmidi_transmit_empty(s)); } static void vt1724_midi_read(struct snd_ice1712 *ice) { struct snd_rawmidi_substream *s; int count, i; u8 buffer[32]; s = get_rawmidi_substream(ice, SNDRV_RAWMIDI_STREAM_INPUT); count = inb(ICEREG1724(ice, MPU_RXFIFO)); if (count > 0) { count = min(count, 32); for (i = 0; i < count; ++i) buffer[i] = inb(ICEREG1724(ice, MPU_DATA)); snd_rawmidi_receive(s, buffer, count); } } /* call with ice->reg_lock */ static void enable_midi_irq(struct snd_ice1712 *ice, u8 flag, int enable) { u8 mask = inb(ICEREG1724(ice, IRQMASK)); if (enable) mask &= ~flag; else mask |= flag; outb(mask, ICEREG1724(ice, IRQMASK)); } static void vt1724_enable_midi_irq(struct snd_rawmidi_substream *substream, u8 flag, int enable) { struct snd_ice1712 *ice = substream->rmidi->private_data; spin_lock_irq(&ice->reg_lock); enable_midi_irq(ice, flag, enable); spin_unlock_irq(&ice->reg_lock); } static int vt1724_midi_output_open(struct snd_rawmidi_substream *s) { return 0; } static int vt1724_midi_output_close(struct snd_rawmidi_substream *s) { return 0; } static void vt1724_midi_output_trigger(struct snd_rawmidi_substream *s, int up) { struct snd_ice1712 *ice = s->rmidi->private_data; unsigned long flags; spin_lock_irqsave(&ice->reg_lock, flags); if (up) { ice->midi_output = 1; vt1724_midi_write(ice); } else { ice->midi_output = 0; enable_midi_irq(ice, VT1724_IRQ_MPU_TX, 0); } spin_unlock_irqrestore(&ice->reg_lock, flags); } static void vt1724_midi_output_drain(struct snd_rawmidi_substream *s) { struct snd_ice1712 *ice = s->rmidi->private_data; unsigned long timeout; vt1724_enable_midi_irq(s, VT1724_IRQ_MPU_TX, 0); /* 32 bytes should be transmitted in less than about 12 ms */ timeout = jiffies + msecs_to_jiffies(15); do { if (inb(ICEREG1724(ice, MPU_CTRL)) & VT1724_MPU_TX_EMPTY) break; schedule_timeout_uninterruptible(1); } while (time_after(timeout, jiffies)); } static struct snd_rawmidi_ops vt1724_midi_output_ops = { .open = vt1724_midi_output_open, .close = vt1724_midi_output_close, .trigger = vt1724_midi_output_trigger, .drain = vt1724_midi_output_drain, }; static int vt1724_midi_input_open(struct snd_rawmidi_substream *s) { vt1724_midi_clear_rx(s->rmidi->private_data); vt1724_enable_midi_irq(s, VT1724_IRQ_MPU_RX, 1); return 0; } static int vt1724_midi_input_close(struct snd_rawmidi_substream *s) { vt1724_enable_midi_irq(s, VT1724_IRQ_MPU_RX, 0); return 0; } static void vt1724_midi_input_trigger(struct snd_rawmidi_substream *s, int up) { struct snd_ice1712 *ice = s->rmidi->private_data; unsigned long flags; spin_lock_irqsave(&ice->reg_lock, flags); if (up) { ice->midi_input = 1; vt1724_midi_read(ice); } else { ice->midi_input = 0; } spin_unlock_irqrestore(&ice->reg_lock, flags); } static struct snd_rawmidi_ops vt1724_midi_input_ops = { .open = vt1724_midi_input_open, .close = vt1724_midi_input_close, .trigger = vt1724_midi_input_trigger, }; /* * Interrupt handler */ static irqreturn_t snd_vt1724_interrupt(int irq, void *dev_id) { struct snd_ice1712 *ice = dev_id; unsigned char status; unsigned char status_mask = VT1724_IRQ_MPU_RX | VT1724_IRQ_MPU_TX | VT1724_IRQ_MTPCM; int handled = 0; int timeout = 0; while (1) { status = inb(ICEREG1724(ice, IRQSTAT)); status &= status_mask; if (status == 0) break; spin_lock(&ice->reg_lock); if (++timeout > 10) { status = inb(ICEREG1724(ice, IRQSTAT)); printk(KERN_ERR "ice1724: Too long irq loop, " "status = 0x%x\n", status); if (status & VT1724_IRQ_MPU_TX) { printk(KERN_ERR "ice1724: Disabling MPU_TX\n"); enable_midi_irq(ice, VT1724_IRQ_MPU_TX, 0); } spin_unlock(&ice->reg_lock); break; } handled = 1; if (status & VT1724_IRQ_MPU_TX) { if (ice->midi_output) vt1724_midi_write(ice); else enable_midi_irq(ice, VT1724_IRQ_MPU_TX, 0); /* Due to mysterical reasons, MPU_TX is always * generated (and can't be cleared) when a PCM * playback is going. So let's ignore at the * next loop. */ status_mask &= ~VT1724_IRQ_MPU_TX; } if (status & VT1724_IRQ_MPU_RX) { if (ice->midi_input) vt1724_midi_read(ice); else vt1724_midi_clear_rx(ice); } /* ack MPU irq */ outb(status, ICEREG1724(ice, IRQSTAT)); spin_unlock(&ice->reg_lock); if (status & VT1724_IRQ_MTPCM) { /* * Multi-track PCM * PCM assignment are: * Playback DMA0 (M/C) = playback_pro_substream * Playback DMA1 = playback_con_substream_ds[0] * Playback DMA2 = playback_con_substream_ds[1] * Playback DMA3 = playback_con_substream_ds[2] * Playback DMA4 (SPDIF) = playback_con_substream * Record DMA0 = capture_pro_substream * Record DMA1 = capture_con_substream */ unsigned char mtstat = inb(ICEMT1724(ice, IRQ)); if (mtstat & VT1724_MULTI_PDMA0) { if (ice->playback_pro_substream) snd_pcm_period_elapsed(ice->playback_pro_substream); } if (mtstat & VT1724_MULTI_RDMA0) { if (ice->capture_pro_substream) snd_pcm_period_elapsed(ice->capture_pro_substream); } if (mtstat & VT1724_MULTI_PDMA1) { if (ice->playback_con_substream_ds[0]) snd_pcm_period_elapsed(ice->playback_con_substream_ds[0]); } if (mtstat & VT1724_MULTI_PDMA2) { if (ice->playback_con_substream_ds[1]) snd_pcm_period_elapsed(ice->playback_con_substream_ds[1]); } if (mtstat & VT1724_MULTI_PDMA3) { if (ice->playback_con_substream_ds[2]) snd_pcm_period_elapsed(ice->playback_con_substream_ds[2]); } if (mtstat & VT1724_MULTI_PDMA4) { if (ice->playback_con_substream) snd_pcm_period_elapsed(ice->playback_con_substream); } if (mtstat & VT1724_MULTI_RDMA1) { if (ice->capture_con_substream) snd_pcm_period_elapsed(ice->capture_con_substream); } /* ack anyway to avoid freeze */ outb(mtstat, ICEMT1724(ice, IRQ)); /* ought to really handle this properly */ if (mtstat & VT1724_MULTI_FIFO_ERR) { unsigned char fstat = inb(ICEMT1724(ice, DMA_FIFO_ERR)); outb(fstat, ICEMT1724(ice, DMA_FIFO_ERR)); outb(VT1724_MULTI_FIFO_ERR | inb(ICEMT1724(ice, DMA_INT_MASK)), ICEMT1724(ice, DMA_INT_MASK)); /* If I don't do this, I get machine lockup due to continual interrupts */ } } } return IRQ_RETVAL(handled); } /* * PCM code - professional part (multitrack) */ static unsigned int rates[] = { 8000, 9600, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000, }; static struct snd_pcm_hw_constraint_list hw_constraints_rates_96 = { .count = ARRAY_SIZE(rates) - 2, /* up to 96000 */ .list = rates, .mask = 0, }; static struct snd_pcm_hw_constraint_list hw_constraints_rates_48 = { .count = ARRAY_SIZE(rates) - 5, /* up to 48000 */ .list = rates, .mask = 0, }; static struct snd_pcm_hw_constraint_list hw_constraints_rates_192 = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; struct vt1724_pcm_reg { unsigned int addr; /* ADDR register offset */ unsigned int size; /* SIZE register offset */ unsigned int count; /* COUNT register offset */ unsigned int start; /* start & pause bit */ }; static int snd_vt1724_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); unsigned char what; unsigned char old; struct snd_pcm_substream *s; what = 0; snd_pcm_group_for_each_entry(s, substream) { if (snd_pcm_substream_chip(s) == ice) { const struct vt1724_pcm_reg *reg; reg = s->runtime->private_data; what |= reg->start; snd_pcm_trigger_done(s, substream); } } switch (cmd) { case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: spin_lock(&ice->reg_lock); old = inb(ICEMT1724(ice, DMA_PAUSE)); if (cmd == SNDRV_PCM_TRIGGER_PAUSE_PUSH) old |= what; else old &= ~what; outb(old, ICEMT1724(ice, DMA_PAUSE)); spin_unlock(&ice->reg_lock); break; case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: spin_lock(&ice->reg_lock); old = inb(ICEMT1724(ice, DMA_CONTROL)); if (cmd == SNDRV_PCM_TRIGGER_START) old |= what; else old &= ~what; outb(old, ICEMT1724(ice, DMA_CONTROL)); spin_unlock(&ice->reg_lock); break; case SNDRV_PCM_TRIGGER_RESUME: /* apps will have to restart stream */ break; default: return -EINVAL; } return 0; } /* */ #define DMA_STARTS (VT1724_RDMA0_START|VT1724_PDMA0_START|VT1724_RDMA1_START|\ VT1724_PDMA1_START|VT1724_PDMA2_START|VT1724_PDMA3_START|VT1724_PDMA4_START) #define DMA_PAUSES (VT1724_RDMA0_PAUSE|VT1724_PDMA0_PAUSE|VT1724_RDMA1_PAUSE|\ VT1724_PDMA1_PAUSE|VT1724_PDMA2_PAUSE|VT1724_PDMA3_PAUSE|VT1724_PDMA4_PAUSE) static const unsigned int stdclock_rate_list[16] = { 48000, 24000, 12000, 9600, 32000, 16000, 8000, 96000, 44100, 22050, 11025, 88200, 176400, 0, 192000, 64000 }; static unsigned int stdclock_get_rate(struct snd_ice1712 *ice) { unsigned int rate; rate = stdclock_rate_list[inb(ICEMT1724(ice, RATE)) & 15]; return rate; } static void stdclock_set_rate(struct snd_ice1712 *ice, unsigned int rate) { int i; for (i = 0; i < ARRAY_SIZE(stdclock_rate_list); i++) { if (stdclock_rate_list[i] == rate) { outb(i, ICEMT1724(ice, RATE)); return; } } } static unsigned char stdclock_set_mclk(struct snd_ice1712 *ice, unsigned int rate) { unsigned char val, old; /* check MT02 */ if (ice->eeprom.data[ICE_EEP2_ACLINK] & VT1724_CFG_PRO_I2S) { val = old = inb(ICEMT1724(ice, I2S_FORMAT)); if (rate > 96000) val |= VT1724_MT_I2S_MCLK_128X; /* 128x MCLK */ else val &= ~VT1724_MT_I2S_MCLK_128X; /* 256x MCLK */ if (val != old) { outb(val, ICEMT1724(ice, I2S_FORMAT)); /* master clock changed */ return 1; } } /* no change in master clock */ return 0; } static int snd_vt1724_set_pro_rate(struct snd_ice1712 *ice, unsigned int rate, int force) { unsigned long flags; unsigned char mclk_change; unsigned int i, old_rate; if (rate > ice->hw_rates->list[ice->hw_rates->count - 1]) return -EINVAL; spin_lock_irqsave(&ice->reg_lock, flags); if ((inb(ICEMT1724(ice, DMA_CONTROL)) & DMA_STARTS) || (inb(ICEMT1724(ice, DMA_PAUSE)) & DMA_PAUSES)) { /* running? we cannot change the rate now... */ spin_unlock_irqrestore(&ice->reg_lock, flags); return ((rate == ice->cur_rate) && !force) ? 0 : -EBUSY; } if (!force && is_pro_rate_locked(ice)) { /* comparing required and current rate - makes sense for * internal clock only */ spin_unlock_irqrestore(&ice->reg_lock, flags); return (rate == ice->cur_rate) ? 0 : -EBUSY; } if (force || !ice->is_spdif_master(ice)) { /* force means the rate was switched by ucontrol, otherwise * setting clock rate for internal clock mode */ old_rate = ice->get_rate(ice); if (force || (old_rate != rate)) ice->set_rate(ice, rate); else if (rate == ice->cur_rate) { spin_unlock_irqrestore(&ice->reg_lock, flags); return 0; } } ice->cur_rate = rate; /* setting master clock */ mclk_change = ice->set_mclk(ice, rate); spin_unlock_irqrestore(&ice->reg_lock, flags); if (mclk_change && ice->gpio.i2s_mclk_changed) ice->gpio.i2s_mclk_changed(ice); if (ice->gpio.set_pro_rate) ice->gpio.set_pro_rate(ice, rate); /* set up codecs */ for (i = 0; i < ice->akm_codecs; i++) { if (ice->akm[i].ops.set_rate_val) ice->akm[i].ops.set_rate_val(&ice->akm[i], rate); } if (ice->spdif.ops.setup_rate) ice->spdif.ops.setup_rate(ice, rate); return 0; } static int snd_vt1724_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); int i, chs, err; chs = params_channels(hw_params); mutex_lock(&ice->open_mutex); /* mark surround channels */ if (substream == ice->playback_pro_substream) { /* PDMA0 can be multi-channel up to 8 */ chs = chs / 2 - 1; for (i = 0; i < chs; i++) { if (ice->pcm_reserved[i] && ice->pcm_reserved[i] != substream) { mutex_unlock(&ice->open_mutex); return -EBUSY; } ice->pcm_reserved[i] = substream; } for (; i < 3; i++) { if (ice->pcm_reserved[i] == substream) ice->pcm_reserved[i] = NULL; } } else { for (i = 0; i < 3; i++) { /* check individual playback stream */ if (ice->playback_con_substream_ds[i] == substream) { if (ice->pcm_reserved[i] && ice->pcm_reserved[i] != substream) { mutex_unlock(&ice->open_mutex); return -EBUSY; } ice->pcm_reserved[i] = substream; break; } } } mutex_unlock(&ice->open_mutex); err = snd_vt1724_set_pro_rate(ice, params_rate(hw_params), 0); if (err < 0) return err; return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_vt1724_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); int i; mutex_lock(&ice->open_mutex); /* unmark surround channels */ for (i = 0; i < 3; i++) if (ice->pcm_reserved[i] == substream) ice->pcm_reserved[i] = NULL; mutex_unlock(&ice->open_mutex); return snd_pcm_lib_free_pages(substream); } static int snd_vt1724_playback_pro_prepare(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); unsigned char val; unsigned int size; spin_lock_irq(&ice->reg_lock); val = (8 - substream->runtime->channels) >> 1; outb(val, ICEMT1724(ice, BURST)); outl(substream->runtime->dma_addr, ICEMT1724(ice, PLAYBACK_ADDR)); size = (snd_pcm_lib_buffer_bytes(substream) >> 2) - 1; /* outl(size, ICEMT1724(ice, PLAYBACK_SIZE)); */ outw(size, ICEMT1724(ice, PLAYBACK_SIZE)); outb(size >> 16, ICEMT1724(ice, PLAYBACK_SIZE) + 2); size = (snd_pcm_lib_period_bytes(substream) >> 2) - 1; /* outl(size, ICEMT1724(ice, PLAYBACK_COUNT)); */ outw(size, ICEMT1724(ice, PLAYBACK_COUNT)); outb(size >> 16, ICEMT1724(ice, PLAYBACK_COUNT) + 2); spin_unlock_irq(&ice->reg_lock); /* printk(KERN_DEBUG "pro prepare: ch = %d, addr = 0x%x, " "buffer = 0x%x, period = 0x%x\n", substream->runtime->channels, (unsigned int)substream->runtime->dma_addr, snd_pcm_lib_buffer_bytes(substream), snd_pcm_lib_period_bytes(substream)); */ return 0; } static snd_pcm_uframes_t snd_vt1724_playback_pro_pointer(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); size_t ptr; if (!(inl(ICEMT1724(ice, DMA_CONTROL)) & VT1724_PDMA0_START)) return 0; #if 0 /* read PLAYBACK_ADDR */ ptr = inl(ICEMT1724(ice, PLAYBACK_ADDR)); if (ptr < substream->runtime->dma_addr) { snd_printd("ice1724: invalid negative ptr\n"); return 0; } ptr -= substream->runtime->dma_addr; ptr = bytes_to_frames(substream->runtime, ptr); if (ptr >= substream->runtime->buffer_size) { snd_printd("ice1724: invalid ptr %d (size=%d)\n", (int)ptr, (int)substream->runtime->period_size); return 0; } #else /* read PLAYBACK_SIZE */ ptr = inl(ICEMT1724(ice, PLAYBACK_SIZE)) & 0xffffff; ptr = (ptr + 1) << 2; ptr = bytes_to_frames(substream->runtime, ptr); if (!ptr) ; else if (ptr <= substream->runtime->buffer_size) ptr = substream->runtime->buffer_size - ptr; else { snd_printd("ice1724: invalid ptr %d (size=%d)\n", (int)ptr, (int)substream->runtime->buffer_size); ptr = 0; } #endif return ptr; } static int snd_vt1724_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); const struct vt1724_pcm_reg *reg = substream->runtime->private_data; spin_lock_irq(&ice->reg_lock); outl(substream->runtime->dma_addr, ice->profi_port + reg->addr); outw((snd_pcm_lib_buffer_bytes(substream) >> 2) - 1, ice->profi_port + reg->size); outw((snd_pcm_lib_period_bytes(substream) >> 2) - 1, ice->profi_port + reg->count); spin_unlock_irq(&ice->reg_lock); return 0; } static snd_pcm_uframes_t snd_vt1724_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); const struct vt1724_pcm_reg *reg = substream->runtime->private_data; size_t ptr; if (!(inl(ICEMT1724(ice, DMA_CONTROL)) & reg->start)) return 0; #if 0 /* use ADDR register */ ptr = inl(ice->profi_port + reg->addr); ptr -= substream->runtime->dma_addr; return bytes_to_frames(substream->runtime, ptr); #else /* use SIZE register */ ptr = inw(ice->profi_port + reg->size); ptr = (ptr + 1) << 2; ptr = bytes_to_frames(substream->runtime, ptr); if (!ptr) ; else if (ptr <= substream->runtime->buffer_size) ptr = substream->runtime->buffer_size - ptr; else { snd_printd("ice1724: invalid ptr %d (size=%d)\n", (int)ptr, (int)substream->runtime->buffer_size); ptr = 0; } return ptr; #endif } static const struct vt1724_pcm_reg vt1724_pdma0_reg = { .addr = VT1724_MT_PLAYBACK_ADDR, .size = VT1724_MT_PLAYBACK_SIZE, .count = VT1724_MT_PLAYBACK_COUNT, .start = VT1724_PDMA0_START, }; static const struct vt1724_pcm_reg vt1724_pdma4_reg = { .addr = VT1724_MT_PDMA4_ADDR, .size = VT1724_MT_PDMA4_SIZE, .count = VT1724_MT_PDMA4_COUNT, .start = VT1724_PDMA4_START, }; static const struct vt1724_pcm_reg vt1724_rdma0_reg = { .addr = VT1724_MT_CAPTURE_ADDR, .size = VT1724_MT_CAPTURE_SIZE, .count = VT1724_MT_CAPTURE_COUNT, .start = VT1724_RDMA0_START, }; static const struct vt1724_pcm_reg vt1724_rdma1_reg = { .addr = VT1724_MT_RDMA1_ADDR, .size = VT1724_MT_RDMA1_SIZE, .count = VT1724_MT_RDMA1_COUNT, .start = VT1724_RDMA1_START, }; #define vt1724_playback_pro_reg vt1724_pdma0_reg #define vt1724_playback_spdif_reg vt1724_pdma4_reg #define vt1724_capture_pro_reg vt1724_rdma0_reg #define vt1724_capture_spdif_reg vt1724_rdma1_reg static const struct snd_pcm_hardware snd_vt1724_playback_pro = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START), .formats = SNDRV_PCM_FMTBIT_S32_LE, .rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_192000, .rate_min = 8000, .rate_max = 192000, .channels_min = 2, .channels_max = 8, .buffer_bytes_max = (1UL << 21), /* 19bits dword */ .period_bytes_min = 8 * 4 * 2, /* FIXME: constraints needed */ .period_bytes_max = (1UL << 21), .periods_min = 2, .periods_max = 1024, }; static const struct snd_pcm_hardware snd_vt1724_spdif = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START), .formats = SNDRV_PCM_FMTBIT_S32_LE, .rates = (SNDRV_PCM_RATE_32000|SNDRV_PCM_RATE_44100| SNDRV_PCM_RATE_48000|SNDRV_PCM_RATE_88200| SNDRV_PCM_RATE_96000|SNDRV_PCM_RATE_176400| SNDRV_PCM_RATE_192000), .rate_min = 32000, .rate_max = 192000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (1UL << 18), /* 16bits dword */ .period_bytes_min = 2 * 4 * 2, .period_bytes_max = (1UL << 18), .periods_min = 2, .periods_max = 1024, }; static const struct snd_pcm_hardware snd_vt1724_2ch_stereo = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START), .formats = SNDRV_PCM_FMTBIT_S32_LE, .rates = SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_8000_192000, .rate_min = 8000, .rate_max = 192000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (1UL << 18), /* 16bits dword */ .period_bytes_min = 2 * 4 * 2, .period_bytes_max = (1UL << 18), .periods_min = 2, .periods_max = 1024, }; /* * set rate constraints */ static void set_std_hw_rates(struct snd_ice1712 *ice) { if (ice->eeprom.data[ICE_EEP2_ACLINK] & VT1724_CFG_PRO_I2S) { /* I2S */ /* VT1720 doesn't support more than 96kHz */ if ((ice->eeprom.data[ICE_EEP2_I2S] & 0x08) && !ice->vt1720) ice->hw_rates = &hw_constraints_rates_192; else ice->hw_rates = &hw_constraints_rates_96; } else { /* ACLINK */ ice->hw_rates = &hw_constraints_rates_48; } } static int set_rate_constraints(struct snd_ice1712 *ice, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; runtime->hw.rate_min = ice->hw_rates->list[0]; runtime->hw.rate_max = ice->hw_rates->list[ice->hw_rates->count - 1]; runtime->hw.rates = SNDRV_PCM_RATE_KNOT; return snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, ice->hw_rates); } /* multi-channel playback needs alignment 8x32bit regardless of the channels * actually used */ #define VT1724_BUFFER_ALIGN 0x20 static int snd_vt1724_playback_pro_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); int chs, num_indeps; runtime->private_data = (void *)&vt1724_playback_pro_reg; ice->playback_pro_substream = substream; runtime->hw = snd_vt1724_playback_pro; snd_pcm_set_sync(substream); snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); set_rate_constraints(ice, substream); mutex_lock(&ice->open_mutex); /* calculate the currently available channels */ num_indeps = ice->num_total_dacs / 2 - 1; for (chs = 0; chs < num_indeps; chs++) { if (ice->pcm_reserved[chs]) break; } chs = (chs + 1) * 2; runtime->hw.channels_max = chs; if (chs > 2) /* channels must be even */ snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, 2); mutex_unlock(&ice->open_mutex); snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, VT1724_BUFFER_ALIGN); snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, VT1724_BUFFER_ALIGN); if (ice->pro_open) ice->pro_open(ice, substream); return 0; } static int snd_vt1724_capture_pro_open(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; runtime->private_data = (void *)&vt1724_capture_pro_reg; ice->capture_pro_substream = substream; runtime->hw = snd_vt1724_2ch_stereo; snd_pcm_set_sync(substream); snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); set_rate_constraints(ice, substream); snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, VT1724_BUFFER_ALIGN); snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, VT1724_BUFFER_ALIGN); if (ice->pro_open) ice->pro_open(ice, substream); return 0; } static int snd_vt1724_playback_pro_close(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); if (PRO_RATE_RESET) snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 0); ice->playback_pro_substream = NULL; return 0; } static int snd_vt1724_capture_pro_close(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); if (PRO_RATE_RESET) snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 0); ice->capture_pro_substream = NULL; return 0; } static struct snd_pcm_ops snd_vt1724_playback_pro_ops = { .open = snd_vt1724_playback_pro_open, .close = snd_vt1724_playback_pro_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_vt1724_pcm_hw_params, .hw_free = snd_vt1724_pcm_hw_free, .prepare = snd_vt1724_playback_pro_prepare, .trigger = snd_vt1724_pcm_trigger, .pointer = snd_vt1724_playback_pro_pointer, }; static struct snd_pcm_ops snd_vt1724_capture_pro_ops = { .open = snd_vt1724_capture_pro_open, .close = snd_vt1724_capture_pro_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_vt1724_pcm_hw_params, .hw_free = snd_vt1724_pcm_hw_free, .prepare = snd_vt1724_pcm_prepare, .trigger = snd_vt1724_pcm_trigger, .pointer = snd_vt1724_pcm_pointer, }; static int __devinit snd_vt1724_pcm_profi(struct snd_ice1712 *ice, int device) { struct snd_pcm *pcm; int err; err = snd_pcm_new(ice->card, "ICE1724", device, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_vt1724_playback_pro_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_vt1724_capture_pro_ops); pcm->private_data = ice; pcm->info_flags = 0; strcpy(pcm->name, "ICE1724"); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(ice->pci), 256*1024, 256*1024); ice->pcm_pro = pcm; return 0; } /* * SPDIF PCM */ /* update spdif control bits; call with reg_lock */ static void update_spdif_bits(struct snd_ice1712 *ice, unsigned int val) { unsigned char cbit, disabled; cbit = inb(ICEREG1724(ice, SPDIF_CFG)); disabled = cbit & ~VT1724_CFG_SPDIF_OUT_EN; if (cbit != disabled) outb(disabled, ICEREG1724(ice, SPDIF_CFG)); outw(val, ICEMT1724(ice, SPDIF_CTRL)); if (cbit != disabled) outb(cbit, ICEREG1724(ice, SPDIF_CFG)); outw(val, ICEMT1724(ice, SPDIF_CTRL)); } /* update SPDIF control bits according to the given rate */ static void update_spdif_rate(struct snd_ice1712 *ice, unsigned int rate) { unsigned int val, nval; unsigned long flags; spin_lock_irqsave(&ice->reg_lock, flags); nval = val = inw(ICEMT1724(ice, SPDIF_CTRL)); nval &= ~(7 << 12); switch (rate) { case 44100: break; case 48000: nval |= 2 << 12; break; case 32000: nval |= 3 << 12; break; case 88200: nval |= 4 << 12; break; case 96000: nval |= 5 << 12; break; case 192000: nval |= 6 << 12; break; case 176400: nval |= 7 << 12; break; } if (val != nval) update_spdif_bits(ice, nval); spin_unlock_irqrestore(&ice->reg_lock, flags); } static int snd_vt1724_playback_spdif_prepare(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); if (!ice->force_pdma4) update_spdif_rate(ice, substream->runtime->rate); return snd_vt1724_pcm_prepare(substream); } static int snd_vt1724_playback_spdif_open(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; runtime->private_data = (void *)&vt1724_playback_spdif_reg; ice->playback_con_substream = substream; if (ice->force_pdma4) { runtime->hw = snd_vt1724_2ch_stereo; set_rate_constraints(ice, substream); } else runtime->hw = snd_vt1724_spdif; snd_pcm_set_sync(substream); snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, VT1724_BUFFER_ALIGN); snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, VT1724_BUFFER_ALIGN); if (ice->spdif.ops.open) ice->spdif.ops.open(ice, substream); return 0; } static int snd_vt1724_playback_spdif_close(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); if (PRO_RATE_RESET) snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 0); ice->playback_con_substream = NULL; if (ice->spdif.ops.close) ice->spdif.ops.close(ice, substream); return 0; } static int snd_vt1724_capture_spdif_open(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; runtime->private_data = (void *)&vt1724_capture_spdif_reg; ice->capture_con_substream = substream; if (ice->force_rdma1) { runtime->hw = snd_vt1724_2ch_stereo; set_rate_constraints(ice, substream); } else runtime->hw = snd_vt1724_spdif; snd_pcm_set_sync(substream); snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, VT1724_BUFFER_ALIGN); snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, VT1724_BUFFER_ALIGN); if (ice->spdif.ops.open) ice->spdif.ops.open(ice, substream); return 0; } static int snd_vt1724_capture_spdif_close(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); if (PRO_RATE_RESET) snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 0); ice->capture_con_substream = NULL; if (ice->spdif.ops.close) ice->spdif.ops.close(ice, substream); return 0; } static struct snd_pcm_ops snd_vt1724_playback_spdif_ops = { .open = snd_vt1724_playback_spdif_open, .close = snd_vt1724_playback_spdif_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_vt1724_pcm_hw_params, .hw_free = snd_vt1724_pcm_hw_free, .prepare = snd_vt1724_playback_spdif_prepare, .trigger = snd_vt1724_pcm_trigger, .pointer = snd_vt1724_pcm_pointer, }; static struct snd_pcm_ops snd_vt1724_capture_spdif_ops = { .open = snd_vt1724_capture_spdif_open, .close = snd_vt1724_capture_spdif_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_vt1724_pcm_hw_params, .hw_free = snd_vt1724_pcm_hw_free, .prepare = snd_vt1724_pcm_prepare, .trigger = snd_vt1724_pcm_trigger, .pointer = snd_vt1724_pcm_pointer, }; static int __devinit snd_vt1724_pcm_spdif(struct snd_ice1712 *ice, int device) { char *name; struct snd_pcm *pcm; int play, capt; int err; if (ice->force_pdma4 || (ice->eeprom.data[ICE_EEP2_SPDIF] & VT1724_CFG_SPDIF_OUT_INT)) { play = 1; ice->has_spdif = 1; } else play = 0; if (ice->force_rdma1 || (ice->eeprom.data[ICE_EEP2_SPDIF] & VT1724_CFG_SPDIF_IN)) { capt = 1; ice->has_spdif = 1; } else capt = 0; if (!play && !capt) return 0; /* no spdif device */ if (ice->force_pdma4 || ice->force_rdma1) name = "ICE1724 Secondary"; else name = "ICE1724 IEC958"; err = snd_pcm_new(ice->card, name, device, play, capt, &pcm); if (err < 0) return err; if (play) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_vt1724_playback_spdif_ops); if (capt) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_vt1724_capture_spdif_ops); pcm->private_data = ice; pcm->info_flags = 0; strcpy(pcm->name, name); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(ice->pci), 256*1024, 256*1024); ice->pcm = pcm; return 0; } /* * independent surround PCMs */ static const struct vt1724_pcm_reg vt1724_playback_dma_regs[3] = { { .addr = VT1724_MT_PDMA1_ADDR, .size = VT1724_MT_PDMA1_SIZE, .count = VT1724_MT_PDMA1_COUNT, .start = VT1724_PDMA1_START, }, { .addr = VT1724_MT_PDMA2_ADDR, .size = VT1724_MT_PDMA2_SIZE, .count = VT1724_MT_PDMA2_COUNT, .start = VT1724_PDMA2_START, }, { .addr = VT1724_MT_PDMA3_ADDR, .size = VT1724_MT_PDMA3_SIZE, .count = VT1724_MT_PDMA3_COUNT, .start = VT1724_PDMA3_START, }, }; static int snd_vt1724_playback_indep_prepare(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); unsigned char val; spin_lock_irq(&ice->reg_lock); val = 3 - substream->number; if (inb(ICEMT1724(ice, BURST)) < val) outb(val, ICEMT1724(ice, BURST)); spin_unlock_irq(&ice->reg_lock); return snd_vt1724_pcm_prepare(substream); } static int snd_vt1724_playback_indep_open(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; mutex_lock(&ice->open_mutex); /* already used by PDMA0? */ if (ice->pcm_reserved[substream->number]) { mutex_unlock(&ice->open_mutex); return -EBUSY; /* FIXME: should handle blocking mode properly */ } mutex_unlock(&ice->open_mutex); runtime->private_data = (void *)&vt1724_playback_dma_regs[substream->number]; ice->playback_con_substream_ds[substream->number] = substream; runtime->hw = snd_vt1724_2ch_stereo; snd_pcm_set_sync(substream); snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24); set_rate_constraints(ice, substream); return 0; } static int snd_vt1724_playback_indep_close(struct snd_pcm_substream *substream) { struct snd_ice1712 *ice = snd_pcm_substream_chip(substream); if (PRO_RATE_RESET) snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 0); ice->playback_con_substream_ds[substream->number] = NULL; ice->pcm_reserved[substream->number] = NULL; return 0; } static struct snd_pcm_ops snd_vt1724_playback_indep_ops = { .open = snd_vt1724_playback_indep_open, .close = snd_vt1724_playback_indep_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_vt1724_pcm_hw_params, .hw_free = snd_vt1724_pcm_hw_free, .prepare = snd_vt1724_playback_indep_prepare, .trigger = snd_vt1724_pcm_trigger, .pointer = snd_vt1724_pcm_pointer, }; static int __devinit snd_vt1724_pcm_indep(struct snd_ice1712 *ice, int device) { struct snd_pcm *pcm; int play; int err; play = ice->num_total_dacs / 2 - 1; if (play <= 0) return 0; err = snd_pcm_new(ice->card, "ICE1724 Surrounds", device, play, 0, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_vt1724_playback_indep_ops); pcm->private_data = ice; pcm->info_flags = 0; strcpy(pcm->name, "ICE1724 Surround PCM"); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(ice->pci), 256*1024, 256*1024); ice->pcm_ds = pcm; return 0; } /* * Mixer section */ static int __devinit snd_vt1724_ac97_mixer(struct snd_ice1712 *ice) { int err; if (!(ice->eeprom.data[ICE_EEP2_ACLINK] & VT1724_CFG_PRO_I2S)) { struct snd_ac97_bus *pbus; struct snd_ac97_template ac97; static struct snd_ac97_bus_ops ops = { .write = snd_vt1724_ac97_write, .read = snd_vt1724_ac97_read, }; /* cold reset */ outb(inb(ICEMT1724(ice, AC97_CMD)) | 0x80, ICEMT1724(ice, AC97_CMD)); mdelay(5); /* FIXME */ outb(inb(ICEMT1724(ice, AC97_CMD)) & ~0x80, ICEMT1724(ice, AC97_CMD)); err = snd_ac97_bus(ice->card, 0, &ops, NULL, &pbus); if (err < 0) return err; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = ice; err = snd_ac97_mixer(pbus, &ac97, &ice->ac97); if (err < 0) printk(KERN_WARNING "ice1712: cannot initialize pro ac97, skipped\n"); else return 0; } /* I2S mixer only */ strcat(ice->card->mixername, "ICE1724 - multitrack"); return 0; } /* * */ static inline unsigned int eeprom_triple(struct snd_ice1712 *ice, int idx) { return (unsigned int)ice->eeprom.data[idx] | \ ((unsigned int)ice->eeprom.data[idx + 1] << 8) | \ ((unsigned int)ice->eeprom.data[idx + 2] << 16); } static void snd_vt1724_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ice1712 *ice = entry->private_data; unsigned int idx; snd_iprintf(buffer, "%s\n\n", ice->card->longname); snd_iprintf(buffer, "EEPROM:\n"); snd_iprintf(buffer, " Subvendor : 0x%x\n", ice->eeprom.subvendor); snd_iprintf(buffer, " Size : %i bytes\n", ice->eeprom.size); snd_iprintf(buffer, " Version : %i\n", ice->eeprom.version); snd_iprintf(buffer, " System Config : 0x%x\n", ice->eeprom.data[ICE_EEP2_SYSCONF]); snd_iprintf(buffer, " ACLink : 0x%x\n", ice->eeprom.data[ICE_EEP2_ACLINK]); snd_iprintf(buffer, " I2S : 0x%x\n", ice->eeprom.data[ICE_EEP2_I2S]); snd_iprintf(buffer, " S/PDIF : 0x%x\n", ice->eeprom.data[ICE_EEP2_SPDIF]); snd_iprintf(buffer, " GPIO direction : 0x%x\n", ice->eeprom.gpiodir); snd_iprintf(buffer, " GPIO mask : 0x%x\n", ice->eeprom.gpiomask); snd_iprintf(buffer, " GPIO state : 0x%x\n", ice->eeprom.gpiostate); for (idx = 0x12; idx < ice->eeprom.size; idx++) snd_iprintf(buffer, " Extra #%02i : 0x%x\n", idx, ice->eeprom.data[idx]); snd_iprintf(buffer, "\nRegisters:\n"); snd_iprintf(buffer, " PSDOUT03 : 0x%08x\n", (unsigned)inl(ICEMT1724(ice, ROUTE_PLAYBACK))); for (idx = 0x0; idx < 0x20 ; idx++) snd_iprintf(buffer, " CCS%02x : 0x%02x\n", idx, inb(ice->port+idx)); for (idx = 0x0; idx < 0x30 ; idx++) snd_iprintf(buffer, " MT%02x : 0x%02x\n", idx, inb(ice->profi_port+idx)); } static void __devinit snd_vt1724_proc_init(struct snd_ice1712 *ice) { struct snd_info_entry *entry; if (!snd_card_proc_new(ice->card, "ice1724", &entry)) snd_info_set_text_ops(entry, ice, snd_vt1724_proc_read); } /* * */ static int snd_vt1724_eeprom_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; uinfo->count = sizeof(struct snd_ice1712_eeprom); return 0; } static int snd_vt1724_eeprom_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); memcpy(ucontrol->value.bytes.data, &ice->eeprom, sizeof(ice->eeprom)); return 0; } static struct snd_kcontrol_new snd_vt1724_eeprom __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "ICE1724 EEPROM", .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_vt1724_eeprom_info, .get = snd_vt1724_eeprom_get }; /* */ static int snd_vt1724_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static unsigned int encode_spdif_bits(struct snd_aes_iec958 *diga) { unsigned int val, rbits; val = diga->status[0] & 0x03; /* professional, non-audio */ if (val & 0x01) { /* professional */ if ((diga->status[0] & IEC958_AES0_PRO_EMPHASIS) == IEC958_AES0_PRO_EMPHASIS_5015) val |= 1U << 3; rbits = (diga->status[4] >> 3) & 0x0f; if (rbits) { switch (rbits) { case 2: val |= 5 << 12; break; /* 96k */ case 3: val |= 6 << 12; break; /* 192k */ case 10: val |= 4 << 12; break; /* 88.2k */ case 11: val |= 7 << 12; break; /* 176.4k */ } } else { switch (diga->status[0] & IEC958_AES0_PRO_FS) { case IEC958_AES0_PRO_FS_44100: break; case IEC958_AES0_PRO_FS_32000: val |= 3U << 12; break; default: val |= 2U << 12; break; } } } else { /* consumer */ val |= diga->status[1] & 0x04; /* copyright */ if ((diga->status[0] & IEC958_AES0_CON_EMPHASIS) == IEC958_AES0_CON_EMPHASIS_5015) val |= 1U << 3; val |= (unsigned int)(diga->status[1] & 0x3f) << 4; /* category */ val |= (unsigned int)(diga->status[3] & IEC958_AES3_CON_FS) << 12; /* fs */ } return val; } static void decode_spdif_bits(struct snd_aes_iec958 *diga, unsigned int val) { memset(diga->status, 0, sizeof(diga->status)); diga->status[0] = val & 0x03; /* professional, non-audio */ if (val & 0x01) { /* professional */ if (val & (1U << 3)) diga->status[0] |= IEC958_AES0_PRO_EMPHASIS_5015; switch ((val >> 12) & 0x7) { case 0: break; case 2: diga->status[0] |= IEC958_AES0_PRO_FS_32000; break; default: diga->status[0] |= IEC958_AES0_PRO_FS_48000; break; } } else { /* consumer */ diga->status[0] |= val & (1U << 2); /* copyright */ if (val & (1U << 3)) diga->status[0] |= IEC958_AES0_CON_EMPHASIS_5015; diga->status[1] |= (val >> 4) & 0x3f; /* category */ diga->status[3] |= (val >> 12) & 0x07; /* fs */ } } static int snd_vt1724_spdif_default_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val; val = inw(ICEMT1724(ice, SPDIF_CTRL)); decode_spdif_bits(&ucontrol->value.iec958, val); return 0; } static int snd_vt1724_spdif_default_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val, old; val = encode_spdif_bits(&ucontrol->value.iec958); spin_lock_irq(&ice->reg_lock); old = inw(ICEMT1724(ice, SPDIF_CTRL)); if (val != old) update_spdif_bits(ice, val); spin_unlock_irq(&ice->reg_lock); return val != old; } static struct snd_kcontrol_new snd_vt1724_spdif_default __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT), .info = snd_vt1724_spdif_info, .get = snd_vt1724_spdif_default_get, .put = snd_vt1724_spdif_default_put }; static int snd_vt1724_spdif_maskc_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = IEC958_AES0_NONAUDIO | IEC958_AES0_PROFESSIONAL | IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS; ucontrol->value.iec958.status[1] = IEC958_AES1_CON_ORIGINAL | IEC958_AES1_CON_CATEGORY; ucontrol->value.iec958.status[3] = IEC958_AES3_CON_FS; return 0; } static int snd_vt1724_spdif_maskp_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = IEC958_AES0_NONAUDIO | IEC958_AES0_PROFESSIONAL | IEC958_AES0_PRO_FS | IEC958_AES0_PRO_EMPHASIS; return 0; } static struct snd_kcontrol_new snd_vt1724_spdif_maskc __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK), .info = snd_vt1724_spdif_info, .get = snd_vt1724_spdif_maskc_get, }; static struct snd_kcontrol_new snd_vt1724_spdif_maskp __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PRO_MASK), .info = snd_vt1724_spdif_info, .get = snd_vt1724_spdif_maskp_get, }; #define snd_vt1724_spdif_sw_info snd_ctl_boolean_mono_info static int snd_vt1724_spdif_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = inb(ICEREG1724(ice, SPDIF_CFG)) & VT1724_CFG_SPDIF_OUT_EN ? 1 : 0; return 0; } static int snd_vt1724_spdif_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned char old, val; spin_lock_irq(&ice->reg_lock); old = val = inb(ICEREG1724(ice, SPDIF_CFG)); val &= ~VT1724_CFG_SPDIF_OUT_EN; if (ucontrol->value.integer.value[0]) val |= VT1724_CFG_SPDIF_OUT_EN; if (old != val) outb(val, ICEREG1724(ice, SPDIF_CFG)); spin_unlock_irq(&ice->reg_lock); return old != val; } static struct snd_kcontrol_new snd_vt1724_spdif_switch __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, /* FIXME: the following conflict with IEC958 Playback Route */ /* .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, SWITCH), */ .name = SNDRV_CTL_NAME_IEC958("Output ", NONE, SWITCH), .info = snd_vt1724_spdif_sw_info, .get = snd_vt1724_spdif_sw_get, .put = snd_vt1724_spdif_sw_put }; #if 0 /* NOT USED YET */ /* * GPIO access from extern */ #define snd_vt1724_gpio_info snd_ctl_boolean_mono_info int snd_vt1724_gpio_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int shift = kcontrol->private_value & 0xff; int invert = (kcontrol->private_value & (1<<24)) ? 1 : 0; snd_ice1712_save_gpio_status(ice); ucontrol->value.integer.value[0] = (snd_ice1712_gpio_read(ice) & (1 << shift) ? 1 : 0) ^ invert; snd_ice1712_restore_gpio_status(ice); return 0; } int snd_ice1712_gpio_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int shift = kcontrol->private_value & 0xff; int invert = (kcontrol->private_value & (1<<24)) ? mask : 0; unsigned int val, nval; if (kcontrol->private_value & (1 << 31)) return -EPERM; nval = (ucontrol->value.integer.value[0] ? (1 << shift) : 0) ^ invert; snd_ice1712_save_gpio_status(ice); val = snd_ice1712_gpio_read(ice); nval |= val & ~(1 << shift); if (val != nval) snd_ice1712_gpio_write(ice, nval); snd_ice1712_restore_gpio_status(ice); return val != nval; } #endif /* NOT USED YET */ /* * rate */ static int snd_vt1724_pro_internal_clock_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int hw_rates_count = ice->hw_rates->count; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = hw_rates_count + ice->ext_clock_count; /* upper limit - keep at top */ if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; if (uinfo->value.enumerated.item >= hw_rates_count) /* ext_clock items */ strcpy(uinfo->value.enumerated.name, ice->ext_clock_names[ uinfo->value.enumerated.item - hw_rates_count]); else /* int clock items */ sprintf(uinfo->value.enumerated.name, "%d", ice->hw_rates->list[uinfo->value.enumerated.item]); return 0; } static int snd_vt1724_pro_internal_clock_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int i, rate; spin_lock_irq(&ice->reg_lock); if (ice->is_spdif_master(ice)) { ucontrol->value.enumerated.item[0] = ice->hw_rates->count + ice->get_spdif_master_type(ice); } else { rate = ice->get_rate(ice); ucontrol->value.enumerated.item[0] = 0; for (i = 0; i < ice->hw_rates->count; i++) { if (ice->hw_rates->list[i] == rate) { ucontrol->value.enumerated.item[0] = i; break; } } } spin_unlock_irq(&ice->reg_lock); return 0; } static int stdclock_get_spdif_master_type(struct snd_ice1712 *ice) { /* standard external clock - only single type - SPDIF IN */ return 0; } /* setting clock to external - SPDIF */ static int stdclock_set_spdif_clock(struct snd_ice1712 *ice, int type) { unsigned char oval; unsigned char i2s_oval; oval = inb(ICEMT1724(ice, RATE)); outb(oval | VT1724_SPDIF_MASTER, ICEMT1724(ice, RATE)); /* setting 256fs */ i2s_oval = inb(ICEMT1724(ice, I2S_FORMAT)); outb(i2s_oval & ~VT1724_MT_I2S_MCLK_128X, ICEMT1724(ice, I2S_FORMAT)); return 0; } static int snd_vt1724_pro_internal_clock_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int old_rate, new_rate; unsigned int item = ucontrol->value.enumerated.item[0]; unsigned int first_ext_clock = ice->hw_rates->count; if (item > first_ext_clock + ice->ext_clock_count - 1) return -EINVAL; /* if rate = 0 => external clock */ spin_lock_irq(&ice->reg_lock); if (ice->is_spdif_master(ice)) old_rate = 0; else old_rate = ice->get_rate(ice); if (item >= first_ext_clock) { /* switching to external clock */ ice->set_spdif_clock(ice, item - first_ext_clock); new_rate = 0; } else { /* internal on-card clock */ new_rate = ice->hw_rates->list[item]; ice->pro_rate_default = new_rate; spin_unlock_irq(&ice->reg_lock); snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 1); spin_lock_irq(&ice->reg_lock); } spin_unlock_irq(&ice->reg_lock); /* the first switch to the ext. clock mode? */ if (old_rate != new_rate && !new_rate) { /* notify akm chips as well */ unsigned int i; if (ice->gpio.set_pro_rate) ice->gpio.set_pro_rate(ice, 0); for (i = 0; i < ice->akm_codecs; i++) { if (ice->akm[i].ops.set_rate_val) ice->akm[i].ops.set_rate_val(&ice->akm[i], 0); } } return old_rate != new_rate; } static struct snd_kcontrol_new snd_vt1724_pro_internal_clock __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Multi Track Internal Clock", .info = snd_vt1724_pro_internal_clock_info, .get = snd_vt1724_pro_internal_clock_get, .put = snd_vt1724_pro_internal_clock_put }; #define snd_vt1724_pro_rate_locking_info snd_ctl_boolean_mono_info static int snd_vt1724_pro_rate_locking_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = PRO_RATE_LOCKED; return 0; } static int snd_vt1724_pro_rate_locking_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int change = 0, nval; nval = ucontrol->value.integer.value[0] ? 1 : 0; spin_lock_irq(&ice->reg_lock); change = PRO_RATE_LOCKED != nval; PRO_RATE_LOCKED = nval; spin_unlock_irq(&ice->reg_lock); return change; } static struct snd_kcontrol_new snd_vt1724_pro_rate_locking __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Multi Track Rate Locking", .info = snd_vt1724_pro_rate_locking_info, .get = snd_vt1724_pro_rate_locking_get, .put = snd_vt1724_pro_rate_locking_put }; #define snd_vt1724_pro_rate_reset_info snd_ctl_boolean_mono_info static int snd_vt1724_pro_rate_reset_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = PRO_RATE_RESET ? 1 : 0; return 0; } static int snd_vt1724_pro_rate_reset_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int change = 0, nval; nval = ucontrol->value.integer.value[0] ? 1 : 0; spin_lock_irq(&ice->reg_lock); change = PRO_RATE_RESET != nval; PRO_RATE_RESET = nval; spin_unlock_irq(&ice->reg_lock); return change; } static struct snd_kcontrol_new snd_vt1724_pro_rate_reset __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Multi Track Rate Reset", .info = snd_vt1724_pro_rate_reset_info, .get = snd_vt1724_pro_rate_reset_get, .put = snd_vt1724_pro_rate_reset_put }; /* * routing */ static int snd_vt1724_pro_route_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[] = { "PCM Out", /* 0 */ "H/W In 0", "H/W In 1", /* 1-2 */ "IEC958 In L", "IEC958 In R", /* 3-4 */ }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 5; if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static inline int analog_route_shift(int idx) { return (idx % 2) * 12 + ((idx / 2) * 3) + 8; } static inline int digital_route_shift(int idx) { return idx * 3; } int snd_ice1724_get_route_val(struct snd_ice1712 *ice, int shift) { unsigned long val; unsigned char eitem; static const unsigned char xlate[8] = { 0, 255, 1, 2, 255, 255, 3, 4, }; val = inl(ICEMT1724(ice, ROUTE_PLAYBACK)); val >>= shift; val &= 7; /* we now have 3 bits per output */ eitem = xlate[val]; if (eitem == 255) { snd_BUG(); return 0; } return eitem; } int snd_ice1724_put_route_val(struct snd_ice1712 *ice, unsigned int val, int shift) { unsigned int old_val, nval; int change; static const unsigned char xroute[8] = { 0, /* PCM */ 2, /* PSDIN0 Left */ 3, /* PSDIN0 Right */ 6, /* SPDIN Left */ 7, /* SPDIN Right */ }; nval = xroute[val % 5]; val = old_val = inl(ICEMT1724(ice, ROUTE_PLAYBACK)); val &= ~(0x07 << shift); val |= nval << shift; change = val != old_val; if (change) outl(val, ICEMT1724(ice, ROUTE_PLAYBACK)); return change; } static int snd_vt1724_pro_route_analog_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); ucontrol->value.enumerated.item[0] = snd_ice1724_get_route_val(ice, analog_route_shift(idx)); return 0; } static int snd_vt1724_pro_route_analog_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); return snd_ice1724_put_route_val(ice, ucontrol->value.enumerated.item[0], analog_route_shift(idx)); } static int snd_vt1724_pro_route_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); ucontrol->value.enumerated.item[0] = snd_ice1724_get_route_val(ice, digital_route_shift(idx)); return 0; } static int snd_vt1724_pro_route_spdif_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); return snd_ice1724_put_route_val(ice, ucontrol->value.enumerated.item[0], digital_route_shift(idx)); } static struct snd_kcontrol_new snd_vt1724_mixer_pro_analog_route __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "H/W Playback Route", .info = snd_vt1724_pro_route_info, .get = snd_vt1724_pro_route_analog_get, .put = snd_vt1724_pro_route_analog_put, }; static struct snd_kcontrol_new snd_vt1724_mixer_pro_spdif_route __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, NONE) "Route", .info = snd_vt1724_pro_route_info, .get = snd_vt1724_pro_route_spdif_get, .put = snd_vt1724_pro_route_spdif_put, .count = 2, }; static int snd_vt1724_pro_peak_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 22; /* FIXME: for compatibility with ice1712... */ uinfo->value.integer.min = 0; uinfo->value.integer.max = 255; return 0; } static int snd_vt1724_pro_peak_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); int idx; spin_lock_irq(&ice->reg_lock); for (idx = 0; idx < 22; idx++) { outb(idx, ICEMT1724(ice, MONITOR_PEAKINDEX)); ucontrol->value.integer.value[idx] = inb(ICEMT1724(ice, MONITOR_PEAKDATA)); } spin_unlock_irq(&ice->reg_lock); return 0; } static struct snd_kcontrol_new snd_vt1724_mixer_pro_peak __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "Multi Track Peak", .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, .info = snd_vt1724_pro_peak_info, .get = snd_vt1724_pro_peak_get }; /* * */ static struct snd_ice1712_card_info no_matched __devinitdata; static struct snd_ice1712_card_info *card_tables[] __devinitdata = { snd_vt1724_revo_cards, snd_vt1724_amp_cards, snd_vt1724_aureon_cards, snd_vt1720_mobo_cards, snd_vt1720_pontis_cards, snd_vt1724_prodigy_hifi_cards, snd_vt1724_prodigy192_cards, snd_vt1724_juli_cards, snd_vt1724_maya44_cards, snd_vt1724_phase_cards, snd_vt1724_wtm_cards, snd_vt1724_se_cards, snd_vt1724_qtet_cards, NULL, }; /* */ static void wait_i2c_busy(struct snd_ice1712 *ice) { int t = 0x10000; while ((inb(ICEREG1724(ice, I2C_CTRL)) & VT1724_I2C_BUSY) && t--) ; if (t == -1) printk(KERN_ERR "ice1724: i2c busy timeout\n"); } unsigned char snd_vt1724_read_i2c(struct snd_ice1712 *ice, unsigned char dev, unsigned char addr) { unsigned char val; mutex_lock(&ice->i2c_mutex); wait_i2c_busy(ice); outb(addr, ICEREG1724(ice, I2C_BYTE_ADDR)); outb(dev & ~VT1724_I2C_WRITE, ICEREG1724(ice, I2C_DEV_ADDR)); wait_i2c_busy(ice); val = inb(ICEREG1724(ice, I2C_DATA)); mutex_unlock(&ice->i2c_mutex); /* printk(KERN_DEBUG "i2c_read: [0x%x,0x%x] = 0x%x\n", dev, addr, val); */ return val; } void snd_vt1724_write_i2c(struct snd_ice1712 *ice, unsigned char dev, unsigned char addr, unsigned char data) { mutex_lock(&ice->i2c_mutex); wait_i2c_busy(ice); /* printk(KERN_DEBUG "i2c_write: [0x%x,0x%x] = 0x%x\n", dev, addr, data); */ outb(addr, ICEREG1724(ice, I2C_BYTE_ADDR)); outb(data, ICEREG1724(ice, I2C_DATA)); outb(dev | VT1724_I2C_WRITE, ICEREG1724(ice, I2C_DEV_ADDR)); wait_i2c_busy(ice); mutex_unlock(&ice->i2c_mutex); } static int __devinit snd_vt1724_read_eeprom(struct snd_ice1712 *ice, const char *modelname) { const int dev = 0xa0; /* EEPROM device address */ unsigned int i, size; struct snd_ice1712_card_info * const *tbl, *c; if (!modelname || !*modelname) { ice->eeprom.subvendor = 0; if ((inb(ICEREG1724(ice, I2C_CTRL)) & VT1724_I2C_EEPROM) != 0) ice->eeprom.subvendor = (snd_vt1724_read_i2c(ice, dev, 0x00) << 0) | (snd_vt1724_read_i2c(ice, dev, 0x01) << 8) | (snd_vt1724_read_i2c(ice, dev, 0x02) << 16) | (snd_vt1724_read_i2c(ice, dev, 0x03) << 24); if (ice->eeprom.subvendor == 0 || ice->eeprom.subvendor == (unsigned int)-1) { /* invalid subvendor from EEPROM, try the PCI * subststem ID instead */ u16 vendor, device; pci_read_config_word(ice->pci, PCI_SUBSYSTEM_VENDOR_ID, &vendor); pci_read_config_word(ice->pci, PCI_SUBSYSTEM_ID, &device); ice->eeprom.subvendor = ((unsigned int)swab16(vendor) << 16) | swab16(device); if (ice->eeprom.subvendor == 0 || ice->eeprom.subvendor == (unsigned int)-1) { printk(KERN_ERR "ice1724: No valid ID is found\n"); return -ENXIO; } } } for (tbl = card_tables; *tbl; tbl++) { for (c = *tbl; c->subvendor; c++) { if (modelname && c->model && !strcmp(modelname, c->model)) { printk(KERN_INFO "ice1724: Using board model %s\n", c->name); ice->eeprom.subvendor = c->subvendor; } else if (c->subvendor != ice->eeprom.subvendor) continue; if (!c->eeprom_size || !c->eeprom_data) goto found; /* if the EEPROM is given by the driver, use it */ snd_printdd("using the defined eeprom..\n"); ice->eeprom.version = 2; ice->eeprom.size = c->eeprom_size + 6; memcpy(ice->eeprom.data, c->eeprom_data, c->eeprom_size); goto read_skipped; } } printk(KERN_WARNING "ice1724: No matching model found for ID 0x%x\n", ice->eeprom.subvendor); found: ice->eeprom.size = snd_vt1724_read_i2c(ice, dev, 0x04); if (ice->eeprom.size < 6) ice->eeprom.size = 32; else if (ice->eeprom.size > 32) { printk(KERN_ERR "ice1724: Invalid EEPROM (size = %i)\n", ice->eeprom.size); return -EIO; } ice->eeprom.version = snd_vt1724_read_i2c(ice, dev, 0x05); if (ice->eeprom.version != 2) printk(KERN_WARNING "ice1724: Invalid EEPROM version %i\n", ice->eeprom.version); size = ice->eeprom.size - 6; for (i = 0; i < size; i++) ice->eeprom.data[i] = snd_vt1724_read_i2c(ice, dev, i + 6); read_skipped: ice->eeprom.gpiomask = eeprom_triple(ice, ICE_EEP2_GPIO_MASK); ice->eeprom.gpiostate = eeprom_triple(ice, ICE_EEP2_GPIO_STATE); ice->eeprom.gpiodir = eeprom_triple(ice, ICE_EEP2_GPIO_DIR); return 0; } static void snd_vt1724_chip_reset(struct snd_ice1712 *ice) { outb(VT1724_RESET , ICEREG1724(ice, CONTROL)); inb(ICEREG1724(ice, CONTROL)); /* pci posting flush */ msleep(10); outb(0, ICEREG1724(ice, CONTROL)); inb(ICEREG1724(ice, CONTROL)); /* pci posting flush */ msleep(10); } static int snd_vt1724_chip_init(struct snd_ice1712 *ice) { outb(ice->eeprom.data[ICE_EEP2_SYSCONF], ICEREG1724(ice, SYS_CFG)); outb(ice->eeprom.data[ICE_EEP2_ACLINK], ICEREG1724(ice, AC97_CFG)); outb(ice->eeprom.data[ICE_EEP2_I2S], ICEREG1724(ice, I2S_FEATURES)); outb(ice->eeprom.data[ICE_EEP2_SPDIF], ICEREG1724(ice, SPDIF_CFG)); ice->gpio.write_mask = ice->eeprom.gpiomask; ice->gpio.direction = ice->eeprom.gpiodir; snd_vt1724_set_gpio_mask(ice, ice->eeprom.gpiomask); snd_vt1724_set_gpio_dir(ice, ice->eeprom.gpiodir); snd_vt1724_set_gpio_data(ice, ice->eeprom.gpiostate); outb(0, ICEREG1724(ice, POWERDOWN)); /* MPU_RX and TX irq masks are cleared later dynamically */ outb(VT1724_IRQ_MPU_RX | VT1724_IRQ_MPU_TX , ICEREG1724(ice, IRQMASK)); /* don't handle FIFO overrun/underruns (just yet), * since they cause machine lockups */ outb(VT1724_MULTI_FIFO_ERR, ICEMT1724(ice, DMA_INT_MASK)); return 0; } static int __devinit snd_vt1724_spdif_build_controls(struct snd_ice1712 *ice) { int err; struct snd_kcontrol *kctl; if (snd_BUG_ON(!ice->pcm)) return -EIO; if (!ice->own_routing) { err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_mixer_pro_spdif_route, ice)); if (err < 0) return err; } err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_spdif_switch, ice)); if (err < 0) return err; err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_default, ice)); if (err < 0) return err; kctl->id.device = ice->pcm->device; err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_maskc, ice)); if (err < 0) return err; kctl->id.device = ice->pcm->device; err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_maskp, ice)); if (err < 0) return err; kctl->id.device = ice->pcm->device; #if 0 /* use default only */ err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_stream, ice)); if (err < 0) return err; kctl->id.device = ice->pcm->device; ice->spdif.stream_ctl = kctl; #endif return 0; } static int __devinit snd_vt1724_build_controls(struct snd_ice1712 *ice) { int err; err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_eeprom, ice)); if (err < 0) return err; err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_pro_internal_clock, ice)); if (err < 0) return err; err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_pro_rate_locking, ice)); if (err < 0) return err; err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_pro_rate_reset, ice)); if (err < 0) return err; if (!ice->own_routing && ice->num_total_dacs > 0) { struct snd_kcontrol_new tmp = snd_vt1724_mixer_pro_analog_route; tmp.count = ice->num_total_dacs; if (ice->vt1720 && tmp.count > 2) tmp.count = 2; err = snd_ctl_add(ice->card, snd_ctl_new1(&tmp, ice)); if (err < 0) return err; } err = snd_ctl_add(ice->card, snd_ctl_new1(&snd_vt1724_mixer_pro_peak, ice)); if (err < 0) return err; return 0; } static int snd_vt1724_free(struct snd_ice1712 *ice) { if (!ice->port) goto __hw_end; /* mask all interrupts */ outb(0xff, ICEMT1724(ice, DMA_INT_MASK)); outb(0xff, ICEREG1724(ice, IRQMASK)); /* --- */ __hw_end: if (ice->irq >= 0) free_irq(ice->irq, ice); pci_release_regions(ice->pci); snd_ice1712_akm4xxx_free(ice); pci_disable_device(ice->pci); kfree(ice->spec); kfree(ice); return 0; } static int snd_vt1724_dev_free(struct snd_device *device) { struct snd_ice1712 *ice = device->device_data; return snd_vt1724_free(ice); } static int __devinit snd_vt1724_create(struct snd_card *card, struct pci_dev *pci, const char *modelname, struct snd_ice1712 **r_ice1712) { struct snd_ice1712 *ice; int err; static struct snd_device_ops ops = { .dev_free = snd_vt1724_dev_free, }; *r_ice1712 = NULL; /* enable PCI device */ err = pci_enable_device(pci); if (err < 0) return err; ice = kzalloc(sizeof(*ice), GFP_KERNEL); if (ice == NULL) { pci_disable_device(pci); return -ENOMEM; } ice->vt1724 = 1; spin_lock_init(&ice->reg_lock); mutex_init(&ice->gpio_mutex); mutex_init(&ice->open_mutex); mutex_init(&ice->i2c_mutex); ice->gpio.set_mask = snd_vt1724_set_gpio_mask; ice->gpio.get_mask = snd_vt1724_get_gpio_mask; ice->gpio.set_dir = snd_vt1724_set_gpio_dir; ice->gpio.get_dir = snd_vt1724_get_gpio_dir; ice->gpio.set_data = snd_vt1724_set_gpio_data; ice->gpio.get_data = snd_vt1724_get_gpio_data; ice->card = card; ice->pci = pci; ice->irq = -1; pci_set_master(pci); snd_vt1724_proc_init(ice); synchronize_irq(pci->irq); card->private_data = ice; err = pci_request_regions(pci, "ICE1724"); if (err < 0) { kfree(ice); pci_disable_device(pci); return err; } ice->port = pci_resource_start(pci, 0); ice->profi_port = pci_resource_start(pci, 1); if (request_irq(pci->irq, snd_vt1724_interrupt, IRQF_SHARED, "ICE1724", ice)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_vt1724_free(ice); return -EIO; } ice->irq = pci->irq; snd_vt1724_chip_reset(ice); if (snd_vt1724_read_eeprom(ice, modelname) < 0) { snd_vt1724_free(ice); return -EIO; } if (snd_vt1724_chip_init(ice) < 0) { snd_vt1724_free(ice); return -EIO; } err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, ice, &ops); if (err < 0) { snd_vt1724_free(ice); return err; } snd_card_set_dev(card, &pci->dev); *r_ice1712 = ice; return 0; } /* * * Registration * */ static int __devinit snd_vt1724_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_ice1712 *ice; int pcm_dev = 0, err; struct snd_ice1712_card_info * const *tbl, *c; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; strcpy(card->driver, "ICE1724"); strcpy(card->shortname, "ICEnsemble ICE1724"); err = snd_vt1724_create(card, pci, model[dev], &ice); if (err < 0) { snd_card_free(card); return err; } /* field init before calling chip_init */ ice->ext_clock_count = 0; for (tbl = card_tables; *tbl; tbl++) { for (c = *tbl; c->subvendor; c++) { if (c->subvendor == ice->eeprom.subvendor) { strcpy(card->shortname, c->name); if (c->driver) /* specific driver? */ strcpy(card->driver, c->driver); if (c->chip_init) { err = c->chip_init(ice); if (err < 0) { snd_card_free(card); return err; } } goto __found; } } } c = &no_matched; __found: /* * VT1724 has separate DMAs for the analog and the SPDIF streams while * ICE1712 has only one for both (mixed up). * * Confusingly the analog PCM is named "professional" here because it * was called so in ice1712 driver, and vt1724 driver is derived from * ice1712 driver. */ ice->pro_rate_default = PRO_RATE_DEFAULT; if (!ice->is_spdif_master) ice->is_spdif_master = stdclock_is_spdif_master; if (!ice->get_rate) ice->get_rate = stdclock_get_rate; if (!ice->set_rate) ice->set_rate = stdclock_set_rate; if (!ice->set_mclk) ice->set_mclk = stdclock_set_mclk; if (!ice->set_spdif_clock) ice->set_spdif_clock = stdclock_set_spdif_clock; if (!ice->get_spdif_master_type) ice->get_spdif_master_type = stdclock_get_spdif_master_type; if (!ice->ext_clock_names) ice->ext_clock_names = ext_clock_names; if (!ice->ext_clock_count) ice->ext_clock_count = ARRAY_SIZE(ext_clock_names); if (!ice->hw_rates) set_std_hw_rates(ice); err = snd_vt1724_pcm_profi(ice, pcm_dev++); if (err < 0) { snd_card_free(card); return err; } err = snd_vt1724_pcm_spdif(ice, pcm_dev++); if (err < 0) { snd_card_free(card); return err; } err = snd_vt1724_pcm_indep(ice, pcm_dev++); if (err < 0) { snd_card_free(card); return err; } err = snd_vt1724_ac97_mixer(ice); if (err < 0) { snd_card_free(card); return err; } err = snd_vt1724_build_controls(ice); if (err < 0) { snd_card_free(card); return err; } if (ice->pcm && ice->has_spdif) { /* has SPDIF I/O */ err = snd_vt1724_spdif_build_controls(ice); if (err < 0) { snd_card_free(card); return err; } } if (c->build_controls) { err = c->build_controls(ice); if (err < 0) { snd_card_free(card); return err; } } if (!c->no_mpu401) { if (ice->eeprom.data[ICE_EEP2_SYSCONF] & VT1724_CFG_MPU401) { struct snd_rawmidi *rmidi; err = snd_rawmidi_new(card, "MIDI", 0, 1, 1, &rmidi); if (err < 0) { snd_card_free(card); return err; } ice->rmidi[0] = rmidi; rmidi->private_data = ice; strcpy(rmidi->name, "ICE1724 MIDI"); rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &vt1724_midi_output_ops); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &vt1724_midi_input_ops); /* set watermarks */ outb(VT1724_MPU_RX_FIFO | 0x1, ICEREG1724(ice, MPU_FIFO_WM)); outb(0x1, ICEREG1724(ice, MPU_FIFO_WM)); /* set UART mode */ outb(VT1724_MPU_UART, ICEREG1724(ice, MPU_CTRL)); } } sprintf(card->longname, "%s at 0x%lx, irq %i", card->shortname, ice->port, ice->irq); err = snd_card_register(card); if (err < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_vt1724_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } #ifdef CONFIG_PM static int snd_vt1724_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_ice1712 *ice = card->private_data; if (!ice->pm_suspend_enabled) return 0; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(ice->pcm); snd_pcm_suspend_all(ice->pcm_pro); snd_pcm_suspend_all(ice->pcm_ds); snd_ac97_suspend(ice->ac97); spin_lock_irq(&ice->reg_lock); ice->pm_saved_is_spdif_master = ice->is_spdif_master(ice); ice->pm_saved_spdif_ctrl = inw(ICEMT1724(ice, SPDIF_CTRL)); ice->pm_saved_spdif_cfg = inb(ICEREG1724(ice, SPDIF_CFG)); ice->pm_saved_route = inl(ICEMT1724(ice, ROUTE_PLAYBACK)); spin_unlock_irq(&ice->reg_lock); if (ice->pm_suspend) ice->pm_suspend(ice); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int snd_vt1724_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_ice1712 *ice = card->private_data; if (!ice->pm_suspend_enabled) return 0; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_vt1724_chip_reset(ice); if (snd_vt1724_chip_init(ice) < 0) { snd_card_disconnect(card); return -EIO; } if (ice->pm_resume) ice->pm_resume(ice); if (ice->pm_saved_is_spdif_master) { /* switching to external clock via SPDIF */ ice->set_spdif_clock(ice, 0); } else { /* internal on-card clock */ snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 1); } update_spdif_bits(ice, ice->pm_saved_spdif_ctrl); outb(ice->pm_saved_spdif_cfg, ICEREG1724(ice, SPDIF_CFG)); outl(ice->pm_saved_route, ICEMT1724(ice, ROUTE_PLAYBACK)); if (ice->ac97) snd_ac97_resume(ice->ac97); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct pci_driver driver = { .name = "ICE1724", .id_table = snd_vt1724_ids, .probe = snd_vt1724_probe, .remove = __devexit_p(snd_vt1724_remove), #ifdef CONFIG_PM .suspend = snd_vt1724_suspend, .resume = snd_vt1724_resume, #endif }; static int __init alsa_card_ice1724_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_ice1724_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_ice1724_init) module_exit(alsa_card_ice1724_exit)
gpl-2.0
Drgravy/g3stock
drivers/media/video/videobuf2-memops.c
4358
6171
/* * videobuf2-memops.c - generic memory handling routines for videobuf2 * * Copyright (C) 2010 Samsung Electronics * * Author: Pawel Osciak <pawel@osciak.com> * Marek Szyprowski <m.szyprowski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/file.h> #include <media/videobuf2-core.h> #include <media/videobuf2-memops.h> /** * vb2_get_vma() - acquire and lock the virtual memory area * @vma: given virtual memory area * * This function attempts to acquire an area mapped in the userspace for * the duration of a hardware operation. The area is "locked" by performing * the same set of operation that are done when process calls fork() and * memory areas are duplicated. * * Returns a copy of a virtual memory region on success or NULL. */ struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma) { struct vm_area_struct *vma_copy; vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL); if (vma_copy == NULL) return NULL; if (vma->vm_ops && vma->vm_ops->open) vma->vm_ops->open(vma); if (vma->vm_file) get_file(vma->vm_file); memcpy(vma_copy, vma, sizeof(*vma)); vma_copy->vm_mm = NULL; vma_copy->vm_next = NULL; vma_copy->vm_prev = NULL; return vma_copy; } EXPORT_SYMBOL_GPL(vb2_get_vma); /** * vb2_put_userptr() - release a userspace virtual memory area * @vma: virtual memory region associated with the area to be released * * This function releases the previously acquired memory area after a hardware * operation. */ void vb2_put_vma(struct vm_area_struct *vma) { if (!vma) return; if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) fput(vma->vm_file); kfree(vma); } EXPORT_SYMBOL_GPL(vb2_put_vma); /** * vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory * @vaddr: starting virtual address of the area to be verified * @size: size of the area * @res_paddr: will return physical address for the given vaddr * @res_vma: will return locked copy of struct vm_area for the given area * * This function will go through memory area of size @size mapped at @vaddr and * verify that the underlying physical pages are contiguous. If they are * contiguous the virtual memory area is locked and a @res_vma is filled with * the copy and @res_pa set to the physical address of the buffer. * * Returns 0 on success. */ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, struct vm_area_struct **res_vma, dma_addr_t *res_pa) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long offset, start, end; unsigned long this_pfn, prev_pfn; dma_addr_t pa = 0; start = vaddr; offset = start & ~PAGE_MASK; end = start + size; vma = find_vma(mm, start); if (vma == NULL || vma->vm_end < end) return -EFAULT; for (prev_pfn = 0; start < end; start += PAGE_SIZE) { int ret = follow_pfn(vma, start, &this_pfn); if (ret) return ret; if (prev_pfn == 0) pa = this_pfn << PAGE_SHIFT; else if (this_pfn != prev_pfn + 1) return -EFAULT; prev_pfn = this_pfn; } /* * Memory is contigous, lock vma and return to the caller */ *res_vma = vb2_get_vma(vma); if (*res_vma == NULL) return -ENOMEM; *res_pa = pa + offset; return 0; } EXPORT_SYMBOL_GPL(vb2_get_contig_userptr); /** * vb2_mmap_pfn_range() - map physical pages to userspace * @vma: virtual memory region for the mapping * @paddr: starting physical address of the memory to be mapped * @size: size of the memory to be mapped * @vm_ops: vm operations to be assigned to the created area * @priv: private data to be associated with the area * * Returns 0 on success. */ int vb2_mmap_pfn_range(struct vm_area_struct *vma, unsigned long paddr, unsigned long size, const struct vm_operations_struct *vm_ops, void *priv) { int ret; size = min_t(unsigned long, vma->vm_end - vma->vm_start, size); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ret = remap_pfn_range(vma, vma->vm_start, paddr >> PAGE_SHIFT, size, vma->vm_page_prot); if (ret) { printk(KERN_ERR "Remapping memory failed, error: %d\n", ret); return ret; } vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED; vma->vm_private_data = priv; vma->vm_ops = vm_ops; vma->vm_ops->open(vma); pr_debug("%s: mapped paddr 0x%08lx at 0x%08lx, size %ld\n", __func__, paddr, vma->vm_start, size); return 0; } EXPORT_SYMBOL_GPL(vb2_mmap_pfn_range); /** * vb2_common_vm_open() - increase refcount of the vma * @vma: virtual memory region for the mapping * * This function adds another user to the provided vma. It expects * struct vb2_vmarea_handler pointer in vma->vm_private_data. */ static void vb2_common_vm_open(struct vm_area_struct *vma) { struct vb2_vmarea_handler *h = vma->vm_private_data; pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n", __func__, h, atomic_read(h->refcount), vma->vm_start, vma->vm_end); atomic_inc(h->refcount); } /** * vb2_common_vm_close() - decrease refcount of the vma * @vma: virtual memory region for the mapping * * This function releases the user from the provided vma. It expects * struct vb2_vmarea_handler pointer in vma->vm_private_data. */ static void vb2_common_vm_close(struct vm_area_struct *vma) { struct vb2_vmarea_handler *h = vma->vm_private_data; pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n", __func__, h, atomic_read(h->refcount), vma->vm_start, vma->vm_end); h->put(h->arg); } /** * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped * video buffers */ const struct vm_operations_struct vb2_common_vm_ops = { .open = vb2_common_vm_open, .close = vb2_common_vm_close, }; EXPORT_SYMBOL_GPL(vb2_common_vm_ops); MODULE_DESCRIPTION("common memory handling routines for videobuf2"); MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>"); MODULE_LICENSE("GPL");
gpl-2.0
DevChun/htc-runnymede-ics-kernel
drivers/media/common/tuners/mxl5007t.c
4614
20785
/* * mxl5007t.c - driver for the MaxLinear MxL5007T silicon tuner * * Copyright (C) 2008, 2009 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/i2c.h> #include <linux/types.h> #include <linux/videodev2.h> #include "tuner-i2c.h" #include "mxl5007t.h" static DEFINE_MUTEX(mxl5007t_list_mutex); static LIST_HEAD(hybrid_tuner_instance_list); static int mxl5007t_debug; module_param_named(debug, mxl5007t_debug, int, 0644); MODULE_PARM_DESC(debug, "set debug level"); /* ------------------------------------------------------------------------- */ #define mxl_printk(kern, fmt, arg...) \ printk(kern "%s: " fmt "\n", __func__, ##arg) #define mxl_err(fmt, arg...) \ mxl_printk(KERN_ERR, "%d: " fmt, __LINE__, ##arg) #define mxl_warn(fmt, arg...) \ mxl_printk(KERN_WARNING, fmt, ##arg) #define mxl_info(fmt, arg...) \ mxl_printk(KERN_INFO, fmt, ##arg) #define mxl_debug(fmt, arg...) \ ({ \ if (mxl5007t_debug) \ mxl_printk(KERN_DEBUG, fmt, ##arg); \ }) #define mxl_fail(ret) \ ({ \ int __ret; \ __ret = (ret < 0); \ if (__ret) \ mxl_printk(KERN_ERR, "error %d on line %d", \ ret, __LINE__); \ __ret; \ }) /* ------------------------------------------------------------------------- */ #define MHz 1000000 enum mxl5007t_mode { MxL_MODE_ISDBT = 0, MxL_MODE_DVBT = 1, MxL_MODE_ATSC = 2, MxL_MODE_CABLE = 0x10, }; enum mxl5007t_chip_version { MxL_UNKNOWN_ID = 0x00, MxL_5007_V1_F1 = 0x11, MxL_5007_V1_F2 = 0x12, MxL_5007_V4 = 0x14, MxL_5007_V2_100_F1 = 0x21, MxL_5007_V2_100_F2 = 0x22, MxL_5007_V2_200_F1 = 0x23, MxL_5007_V2_200_F2 = 0x24, }; struct reg_pair_t { u8 reg; u8 val; }; /* ------------------------------------------------------------------------- */ static struct reg_pair_t init_tab[] = { { 0x02, 0x06 }, { 0x03, 0x48 }, { 0x05, 0x04 }, { 0x06, 0x10 }, { 0x2e, 0x15 }, /* OVERRIDE */ { 0x30, 0x10 }, /* OVERRIDE */ { 0x45, 0x58 }, /* OVERRIDE */ { 0x48, 0x19 }, /* OVERRIDE */ { 0x52, 0x03 }, /* OVERRIDE */ { 0x53, 0x44 }, /* OVERRIDE */ { 0x6a, 0x4b }, /* OVERRIDE */ { 0x76, 0x00 }, /* OVERRIDE */ { 0x78, 0x18 }, /* OVERRIDE */ { 0x7a, 0x17 }, /* OVERRIDE */ { 0x85, 0x06 }, /* OVERRIDE */ { 0x01, 0x01 }, /* TOP_MASTER_ENABLE */ { 0, 0 } }; static struct reg_pair_t init_tab_cable[] = { { 0x02, 0x06 }, { 0x03, 0x48 }, { 0x05, 0x04 }, { 0x06, 0x10 }, { 0x09, 0x3f }, { 0x0a, 0x3f }, { 0x0b, 0x3f }, { 0x2e, 0x15 }, /* OVERRIDE */ { 0x30, 0x10 }, /* OVERRIDE */ { 0x45, 0x58 }, /* OVERRIDE */ { 0x48, 0x19 }, /* OVERRIDE */ { 0x52, 0x03 }, /* OVERRIDE */ { 0x53, 0x44 }, /* OVERRIDE */ { 0x6a, 0x4b }, /* OVERRIDE */ { 0x76, 0x00 }, /* OVERRIDE */ { 0x78, 0x18 }, /* OVERRIDE */ { 0x7a, 0x17 }, /* OVERRIDE */ { 0x85, 0x06 }, /* OVERRIDE */ { 0x01, 0x01 }, /* TOP_MASTER_ENABLE */ { 0, 0 } }; /* ------------------------------------------------------------------------- */ static struct reg_pair_t reg_pair_rftune[] = { { 0x0f, 0x00 }, /* abort tune */ { 0x0c, 0x15 }, { 0x0d, 0x40 }, { 0x0e, 0x0e }, { 0x1f, 0x87 }, /* OVERRIDE */ { 0x20, 0x1f }, /* OVERRIDE */ { 0x21, 0x87 }, /* OVERRIDE */ { 0x22, 0x1f }, /* OVERRIDE */ { 0x80, 0x01 }, /* freq dependent */ { 0x0f, 0x01 }, /* start tune */ { 0, 0 } }; /* ------------------------------------------------------------------------- */ struct mxl5007t_state { struct list_head hybrid_tuner_instance_list; struct tuner_i2c_props i2c_props; struct mutex lock; struct mxl5007t_config *config; enum mxl5007t_chip_version chip_id; struct reg_pair_t tab_init[ARRAY_SIZE(init_tab)]; struct reg_pair_t tab_init_cable[ARRAY_SIZE(init_tab_cable)]; struct reg_pair_t tab_rftune[ARRAY_SIZE(reg_pair_rftune)]; u32 frequency; u32 bandwidth; }; /* ------------------------------------------------------------------------- */ /* called by _init and _rftun to manipulate the register arrays */ static void set_reg_bits(struct reg_pair_t *reg_pair, u8 reg, u8 mask, u8 val) { unsigned int i = 0; while (reg_pair[i].reg || reg_pair[i].val) { if (reg_pair[i].reg == reg) { reg_pair[i].val &= ~mask; reg_pair[i].val |= val; } i++; } return; } static void copy_reg_bits(struct reg_pair_t *reg_pair1, struct reg_pair_t *reg_pair2) { unsigned int i, j; i = j = 0; while (reg_pair1[i].reg || reg_pair1[i].val) { while (reg_pair2[j].reg || reg_pair2[j].val) { if (reg_pair1[i].reg != reg_pair2[j].reg) { j++; continue; } reg_pair2[j].val = reg_pair1[i].val; break; } i++; } return; } /* ------------------------------------------------------------------------- */ static void mxl5007t_set_mode_bits(struct mxl5007t_state *state, enum mxl5007t_mode mode, s32 if_diff_out_level) { switch (mode) { case MxL_MODE_ATSC: set_reg_bits(state->tab_init, 0x06, 0x1f, 0x12); break; case MxL_MODE_DVBT: set_reg_bits(state->tab_init, 0x06, 0x1f, 0x11); break; case MxL_MODE_ISDBT: set_reg_bits(state->tab_init, 0x06, 0x1f, 0x10); break; case MxL_MODE_CABLE: set_reg_bits(state->tab_init_cable, 0x09, 0xff, 0xc1); set_reg_bits(state->tab_init_cable, 0x0a, 0xff, 8 - if_diff_out_level); set_reg_bits(state->tab_init_cable, 0x0b, 0xff, 0x17); break; default: mxl_fail(-EINVAL); } return; } static void mxl5007t_set_if_freq_bits(struct mxl5007t_state *state, enum mxl5007t_if_freq if_freq, int invert_if) { u8 val; switch (if_freq) { case MxL_IF_4_MHZ: val = 0x00; break; case MxL_IF_4_5_MHZ: val = 0x02; break; case MxL_IF_4_57_MHZ: val = 0x03; break; case MxL_IF_5_MHZ: val = 0x04; break; case MxL_IF_5_38_MHZ: val = 0x05; break; case MxL_IF_6_MHZ: val = 0x06; break; case MxL_IF_6_28_MHZ: val = 0x07; break; case MxL_IF_9_1915_MHZ: val = 0x08; break; case MxL_IF_35_25_MHZ: val = 0x09; break; case MxL_IF_36_15_MHZ: val = 0x0a; break; case MxL_IF_44_MHZ: val = 0x0b; break; default: mxl_fail(-EINVAL); return; } set_reg_bits(state->tab_init, 0x02, 0x0f, val); /* set inverted IF or normal IF */ set_reg_bits(state->tab_init, 0x02, 0x10, invert_if ? 0x10 : 0x00); return; } static void mxl5007t_set_xtal_freq_bits(struct mxl5007t_state *state, enum mxl5007t_xtal_freq xtal_freq) { switch (xtal_freq) { case MxL_XTAL_16_MHZ: /* select xtal freq & ref freq */ set_reg_bits(state->tab_init, 0x03, 0xf0, 0x00); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x00); break; case MxL_XTAL_20_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0x10); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x01); break; case MxL_XTAL_20_25_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0x20); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x02); break; case MxL_XTAL_20_48_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0x30); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x03); break; case MxL_XTAL_24_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0x40); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x04); break; case MxL_XTAL_25_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0x50); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x05); break; case MxL_XTAL_25_14_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0x60); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x06); break; case MxL_XTAL_27_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0x70); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x07); break; case MxL_XTAL_28_8_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0x80); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x08); break; case MxL_XTAL_32_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0x90); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x09); break; case MxL_XTAL_40_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0xa0); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x0a); break; case MxL_XTAL_44_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0xb0); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x0b); break; case MxL_XTAL_48_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0xc0); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x0c); break; case MxL_XTAL_49_3811_MHZ: set_reg_bits(state->tab_init, 0x03, 0xf0, 0xd0); set_reg_bits(state->tab_init, 0x05, 0x0f, 0x0d); break; default: mxl_fail(-EINVAL); return; } return; } static struct reg_pair_t *mxl5007t_calc_init_regs(struct mxl5007t_state *state, enum mxl5007t_mode mode) { struct mxl5007t_config *cfg = state->config; memcpy(&state->tab_init, &init_tab, sizeof(init_tab)); memcpy(&state->tab_init_cable, &init_tab_cable, sizeof(init_tab_cable)); mxl5007t_set_mode_bits(state, mode, cfg->if_diff_out_level); mxl5007t_set_if_freq_bits(state, cfg->if_freq_hz, cfg->invert_if); mxl5007t_set_xtal_freq_bits(state, cfg->xtal_freq_hz); set_reg_bits(state->tab_init, 0x04, 0x01, cfg->loop_thru_enable); set_reg_bits(state->tab_init, 0x03, 0x08, cfg->clk_out_enable << 3); set_reg_bits(state->tab_init, 0x03, 0x07, cfg->clk_out_amp); if (mode >= MxL_MODE_CABLE) { copy_reg_bits(state->tab_init, state->tab_init_cable); return state->tab_init_cable; } else return state->tab_init; } /* ------------------------------------------------------------------------- */ enum mxl5007t_bw_mhz { MxL_BW_6MHz = 6, MxL_BW_7MHz = 7, MxL_BW_8MHz = 8, }; static void mxl5007t_set_bw_bits(struct mxl5007t_state *state, enum mxl5007t_bw_mhz bw) { u8 val; switch (bw) { case MxL_BW_6MHz: val = 0x15; /* set DIG_MODEINDEX, DIG_MODEINDEX_A, * and DIG_MODEINDEX_CSF */ break; case MxL_BW_7MHz: val = 0x2a; break; case MxL_BW_8MHz: val = 0x3f; break; default: mxl_fail(-EINVAL); return; } set_reg_bits(state->tab_rftune, 0x0c, 0x3f, val); return; } static struct reg_pair_t *mxl5007t_calc_rf_tune_regs(struct mxl5007t_state *state, u32 rf_freq, enum mxl5007t_bw_mhz bw) { u32 dig_rf_freq = 0; u32 temp; u32 frac_divider = 1000000; unsigned int i; memcpy(&state->tab_rftune, &reg_pair_rftune, sizeof(reg_pair_rftune)); mxl5007t_set_bw_bits(state, bw); /* Convert RF frequency into 16 bits => * 10 bit integer (MHz) + 6 bit fraction */ dig_rf_freq = rf_freq / MHz; temp = rf_freq % MHz; for (i = 0; i < 6; i++) { dig_rf_freq <<= 1; frac_divider /= 2; if (temp > frac_divider) { temp -= frac_divider; dig_rf_freq++; } } /* add to have shift center point by 7.8124 kHz */ if (temp > 7812) dig_rf_freq++; set_reg_bits(state->tab_rftune, 0x0d, 0xff, (u8) dig_rf_freq); set_reg_bits(state->tab_rftune, 0x0e, 0xff, (u8) (dig_rf_freq >> 8)); if (rf_freq >= 333000000) set_reg_bits(state->tab_rftune, 0x80, 0x40, 0x40); return state->tab_rftune; } /* ------------------------------------------------------------------------- */ static int mxl5007t_write_reg(struct mxl5007t_state *state, u8 reg, u8 val) { u8 buf[] = { reg, val }; struct i2c_msg msg = { .addr = state->i2c_props.addr, .flags = 0, .buf = buf, .len = 2 }; int ret; ret = i2c_transfer(state->i2c_props.adap, &msg, 1); if (ret != 1) { mxl_err("failed!"); return -EREMOTEIO; } return 0; } static int mxl5007t_write_regs(struct mxl5007t_state *state, struct reg_pair_t *reg_pair) { unsigned int i = 0; int ret = 0; while ((ret == 0) && (reg_pair[i].reg || reg_pair[i].val)) { ret = mxl5007t_write_reg(state, reg_pair[i].reg, reg_pair[i].val); i++; } return ret; } static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val) { struct i2c_msg msg[] = { { .addr = state->i2c_props.addr, .flags = 0, .buf = &reg, .len = 1 }, { .addr = state->i2c_props.addr, .flags = I2C_M_RD, .buf = val, .len = 1 }, }; int ret; ret = i2c_transfer(state->i2c_props.adap, msg, 2); if (ret != 2) { mxl_err("failed!"); return -EREMOTEIO; } return 0; } static int mxl5007t_soft_reset(struct mxl5007t_state *state) { u8 d = 0xff; struct i2c_msg msg = { .addr = state->i2c_props.addr, .flags = 0, .buf = &d, .len = 1 }; int ret = i2c_transfer(state->i2c_props.adap, &msg, 1); if (ret != 1) { mxl_err("failed!"); return -EREMOTEIO; } return 0; } static int mxl5007t_tuner_init(struct mxl5007t_state *state, enum mxl5007t_mode mode) { struct reg_pair_t *init_regs; int ret; ret = mxl5007t_soft_reset(state); if (mxl_fail(ret)) goto fail; /* calculate initialization reg array */ init_regs = mxl5007t_calc_init_regs(state, mode); ret = mxl5007t_write_regs(state, init_regs); if (mxl_fail(ret)) goto fail; mdelay(1); fail: return ret; } static int mxl5007t_tuner_rf_tune(struct mxl5007t_state *state, u32 rf_freq_hz, enum mxl5007t_bw_mhz bw) { struct reg_pair_t *rf_tune_regs; int ret; /* calculate channel change reg array */ rf_tune_regs = mxl5007t_calc_rf_tune_regs(state, rf_freq_hz, bw); ret = mxl5007t_write_regs(state, rf_tune_regs); if (mxl_fail(ret)) goto fail; msleep(3); fail: return ret; } /* ------------------------------------------------------------------------- */ static int mxl5007t_synth_lock_status(struct mxl5007t_state *state, int *rf_locked, int *ref_locked) { u8 d; int ret; *rf_locked = 0; *ref_locked = 0; ret = mxl5007t_read_reg(state, 0xd8, &d); if (mxl_fail(ret)) goto fail; if ((d & 0x0c) == 0x0c) *rf_locked = 1; if ((d & 0x03) == 0x03) *ref_locked = 1; fail: return ret; } /* ------------------------------------------------------------------------- */ static int mxl5007t_get_status(struct dvb_frontend *fe, u32 *status) { struct mxl5007t_state *state = fe->tuner_priv; int rf_locked, ref_locked, ret; *status = 0; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = mxl5007t_synth_lock_status(state, &rf_locked, &ref_locked); if (mxl_fail(ret)) goto fail; mxl_debug("%s%s", rf_locked ? "rf locked " : "", ref_locked ? "ref locked" : ""); if ((rf_locked) || (ref_locked)) *status |= TUNER_STATUS_LOCKED; fail: if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); return ret; } /* ------------------------------------------------------------------------- */ static int mxl5007t_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) { struct mxl5007t_state *state = fe->tuner_priv; enum mxl5007t_bw_mhz bw; enum mxl5007t_mode mode; int ret; u32 freq = params->frequency; if (fe->ops.info.type == FE_ATSC) { switch (params->u.vsb.modulation) { case VSB_8: case VSB_16: mode = MxL_MODE_ATSC; break; case QAM_64: case QAM_256: mode = MxL_MODE_CABLE; break; default: mxl_err("modulation not set!"); return -EINVAL; } bw = MxL_BW_6MHz; } else if (fe->ops.info.type == FE_OFDM) { switch (params->u.ofdm.bandwidth) { case BANDWIDTH_6_MHZ: bw = MxL_BW_6MHz; break; case BANDWIDTH_7_MHZ: bw = MxL_BW_7MHz; break; case BANDWIDTH_8_MHZ: bw = MxL_BW_8MHz; break; default: mxl_err("bandwidth not set!"); return -EINVAL; } mode = MxL_MODE_DVBT; } else { mxl_err("modulation type not supported!"); return -EINVAL; } if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); mutex_lock(&state->lock); ret = mxl5007t_tuner_init(state, mode); if (mxl_fail(ret)) goto fail; ret = mxl5007t_tuner_rf_tune(state, freq, bw); if (mxl_fail(ret)) goto fail; state->frequency = freq; state->bandwidth = (fe->ops.info.type == FE_OFDM) ? params->u.ofdm.bandwidth : 0; fail: mutex_unlock(&state->lock); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); return ret; } /* ------------------------------------------------------------------------- */ static int mxl5007t_init(struct dvb_frontend *fe) { struct mxl5007t_state *state = fe->tuner_priv; int ret; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* wake from standby */ ret = mxl5007t_write_reg(state, 0x01, 0x01); mxl_fail(ret); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); return ret; } static int mxl5007t_sleep(struct dvb_frontend *fe) { struct mxl5007t_state *state = fe->tuner_priv; int ret; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* enter standby mode */ ret = mxl5007t_write_reg(state, 0x01, 0x00); mxl_fail(ret); ret = mxl5007t_write_reg(state, 0x0f, 0x00); mxl_fail(ret); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); return ret; } /* ------------------------------------------------------------------------- */ static int mxl5007t_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct mxl5007t_state *state = fe->tuner_priv; *frequency = state->frequency; return 0; } static int mxl5007t_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { struct mxl5007t_state *state = fe->tuner_priv; *bandwidth = state->bandwidth; return 0; } static int mxl5007t_release(struct dvb_frontend *fe) { struct mxl5007t_state *state = fe->tuner_priv; mutex_lock(&mxl5007t_list_mutex); if (state) hybrid_tuner_release_state(state); mutex_unlock(&mxl5007t_list_mutex); fe->tuner_priv = NULL; return 0; } /* ------------------------------------------------------------------------- */ static struct dvb_tuner_ops mxl5007t_tuner_ops = { .info = { .name = "MaxLinear MxL5007T", }, .init = mxl5007t_init, .sleep = mxl5007t_sleep, .set_params = mxl5007t_set_params, .get_status = mxl5007t_get_status, .get_frequency = mxl5007t_get_frequency, .get_bandwidth = mxl5007t_get_bandwidth, .release = mxl5007t_release, }; static int mxl5007t_get_chip_id(struct mxl5007t_state *state) { char *name; int ret; u8 id; ret = mxl5007t_read_reg(state, 0xd9, &id); if (mxl_fail(ret)) goto fail; switch (id) { case MxL_5007_V1_F1: name = "MxL5007.v1.f1"; break; case MxL_5007_V1_F2: name = "MxL5007.v1.f2"; break; case MxL_5007_V2_100_F1: name = "MxL5007.v2.100.f1"; break; case MxL_5007_V2_100_F2: name = "MxL5007.v2.100.f2"; break; case MxL_5007_V2_200_F1: name = "MxL5007.v2.200.f1"; break; case MxL_5007_V2_200_F2: name = "MxL5007.v2.200.f2"; break; case MxL_5007_V4: name = "MxL5007T.v4"; break; default: name = "MxL5007T"; printk(KERN_WARNING "%s: unknown rev (%02x)\n", __func__, id); id = MxL_UNKNOWN_ID; } state->chip_id = id; mxl_info("%s detected @ %d-%04x", name, i2c_adapter_id(state->i2c_props.adap), state->i2c_props.addr); return 0; fail: mxl_warn("unable to identify device @ %d-%04x", i2c_adapter_id(state->i2c_props.adap), state->i2c_props.addr); state->chip_id = MxL_UNKNOWN_ID; return ret; } struct dvb_frontend *mxl5007t_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, u8 addr, struct mxl5007t_config *cfg) { struct mxl5007t_state *state = NULL; int instance, ret; mutex_lock(&mxl5007t_list_mutex); instance = hybrid_tuner_request_state(struct mxl5007t_state, state, hybrid_tuner_instance_list, i2c, addr, "mxl5007t"); switch (instance) { case 0: goto fail; case 1: /* new tuner instance */ state->config = cfg; mutex_init(&state->lock); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = mxl5007t_get_chip_id(state); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); /* check return value of mxl5007t_get_chip_id */ if (mxl_fail(ret)) goto fail; break; default: /* existing tuner instance */ break; } fe->tuner_priv = state; mutex_unlock(&mxl5007t_list_mutex); memcpy(&fe->ops.tuner_ops, &mxl5007t_tuner_ops, sizeof(struct dvb_tuner_ops)); return fe; fail: mutex_unlock(&mxl5007t_list_mutex); mxl5007t_release(fe); return NULL; } EXPORT_SYMBOL_GPL(mxl5007t_attach); MODULE_DESCRIPTION("MaxLinear MxL5007T Silicon IC tuner driver"); MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.2"); /* * Overrides for Emacs so that we follow Linus's tabbing style. * --------------------------------------------------------------------------- * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
Shabbypenguin/Cayman-Island-Kernel
drivers/ide/ide-pnp.c
4614
2678
/* * This file provides autodetection for ISA PnP IDE interfaces. * It was tested with "ESS ES1868 Plug and Play AudioDrive" IDE interface. * * Copyright (C) 2000 Andrey Panin <pazke@donpac.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * You should have received a copy of the GNU General Public License * (for example /usr/src/linux/COPYING); if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/pnp.h> #include <linux/ide.h> #define DRV_NAME "ide-pnp" /* Add your devices here :)) */ static struct pnp_device_id idepnp_devices[] = { /* Generic ESDI/IDE/ATA compatible hard disk controller */ {.id = "PNP0600", .driver_data = 0}, {.id = ""} }; static const struct ide_port_info ide_pnp_port_info = { .host_flags = IDE_HFLAG_NO_DMA, .chipset = ide_generic, }; static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) { struct ide_host *host; unsigned long base, ctl; int rc; struct ide_hw hw, *hws[] = { &hw }; printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0))) return -1; base = pnp_port_start(dev, 0); ctl = pnp_port_start(dev, 1); if (!request_region(base, 8, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", DRV_NAME, base, base + 7); return -EBUSY; } if (!request_region(ctl, 1, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n", DRV_NAME, ctl); release_region(base, 8); return -EBUSY; } memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw, base, ctl); hw.irq = pnp_irq(dev, 0); rc = ide_host_add(&ide_pnp_port_info, hws, 1, &host); if (rc) goto out; pnp_set_drvdata(dev, host); return 0; out: release_region(ctl, 1); release_region(base, 8); return rc; } static void idepnp_remove(struct pnp_dev *dev) { struct ide_host *host = pnp_get_drvdata(dev); ide_host_remove(host); release_region(pnp_port_start(dev, 1), 1); release_region(pnp_port_start(dev, 0), 8); } static struct pnp_driver idepnp_driver = { .name = "ide", .id_table = idepnp_devices, .probe = idepnp_probe, .remove = idepnp_remove, }; static int __init pnpide_init(void) { return pnp_register_driver(&idepnp_driver); } static void __exit pnpide_exit(void) { pnp_unregister_driver(&idepnp_driver); } module_init(pnpide_init); module_exit(pnpide_exit); MODULE_LICENSE("GPL");
gpl-2.0
tarunkapadia93/android_kernel_xiaomi_armani
fs/jfs/super.c
4614
21592
/* * Copyright (C) International Business Machines Corp., 2000-2004 * Portions Copyright (C) Christoph Hellwig, 2001-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/module.h> #include <linux/parser.h> #include <linux/completion.h> #include <linux/vfs.h> #include <linux/quotaops.h> #include <linux/mount.h> #include <linux/moduleparam.h> #include <linux/kthread.h> #include <linux/posix_acl.h> #include <linux/buffer_head.h> #include <linux/exportfs.h> #include <linux/crc32.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/seq_file.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_inode.h" #include "jfs_metapage.h" #include "jfs_superblock.h" #include "jfs_dmap.h" #include "jfs_imap.h" #include "jfs_acl.h" #include "jfs_debug.h" MODULE_DESCRIPTION("The Journaled Filesystem (JFS)"); MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM"); MODULE_LICENSE("GPL"); static struct kmem_cache * jfs_inode_cachep; static const struct super_operations jfs_super_operations; static const struct export_operations jfs_export_operations; static struct file_system_type jfs_fs_type; #define MAX_COMMIT_THREADS 64 static int commit_threads = 0; module_param(commit_threads, int, 0); MODULE_PARM_DESC(commit_threads, "Number of commit threads"); static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS]; struct task_struct *jfsIOthread; struct task_struct *jfsSyncThread; #ifdef CONFIG_JFS_DEBUG int jfsloglevel = JFS_LOGLEVEL_WARN; module_param(jfsloglevel, int, 0644); MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)"); #endif static void jfs_handle_error(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); if (sb->s_flags & MS_RDONLY) return; updateSuper(sb, FM_DIRTY); if (sbi->flag & JFS_ERR_PANIC) panic("JFS (device %s): panic forced after error\n", sb->s_id); else if (sbi->flag & JFS_ERR_REMOUNT_RO) { jfs_err("ERROR: (device %s): remounting filesystem " "as read-only\n", sb->s_id); sb->s_flags |= MS_RDONLY; } /* nothing is done for continue beyond marking the superblock dirty */ } void jfs_error(struct super_block *sb, const char * function, ...) { static char error_buf[256]; va_list args; va_start(args, function); vsnprintf(error_buf, sizeof(error_buf), function, args); va_end(args); printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf); jfs_handle_error(sb); } static struct inode *jfs_alloc_inode(struct super_block *sb) { struct jfs_inode_info *jfs_inode; jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS); if (!jfs_inode) return NULL; return &jfs_inode->vfs_inode; } static void jfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct jfs_inode_info *ji = JFS_IP(inode); kmem_cache_free(jfs_inode_cachep, ji); } static void jfs_destroy_inode(struct inode *inode) { struct jfs_inode_info *ji = JFS_IP(inode); BUG_ON(!list_empty(&ji->anon_inode_list)); spin_lock_irq(&ji->ag_lock); if (ji->active_ag != -1) { struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; atomic_dec(&bmap->db_active[ji->active_ag]); ji->active_ag = -1; } spin_unlock_irq(&ji->ag_lock); call_rcu(&inode->i_rcu, jfs_i_callback); } static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb); s64 maxinodes; struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap; jfs_info("In jfs_statfs"); buf->f_type = JFS_SUPER_MAGIC; buf->f_bsize = sbi->bsize; buf->f_blocks = sbi->bmap->db_mapsize; buf->f_bfree = sbi->bmap->db_nfree; buf->f_bavail = sbi->bmap->db_nfree; /* * If we really return the number of allocated & free inodes, some * applications will fail because they won't see enough free inodes. * We'll try to calculate some guess as to how may inodes we can * really allocate * * buf->f_files = atomic_read(&imap->im_numinos); * buf->f_ffree = atomic_read(&imap->im_numfree); */ maxinodes = min((s64) atomic_read(&imap->im_numinos) + ((sbi->bmap->db_nfree >> imap->im_l2nbperiext) << L2INOSPEREXT), (s64) 0xffffffffLL); buf->f_files = maxinodes; buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) - atomic_read(&imap->im_numfree)); buf->f_fsid.val[0] = (u32)crc32_le(0, sbi->uuid, sizeof(sbi->uuid)/2); buf->f_fsid.val[1] = (u32)crc32_le(0, sbi->uuid + sizeof(sbi->uuid)/2, sizeof(sbi->uuid)/2); buf->f_namelen = JFS_NAME_MAX; return 0; } static void jfs_put_super(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); int rc; jfs_info("In jfs_put_super"); dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); rc = jfs_umount(sb); if (rc) jfs_err("jfs_umount failed with return code %d", rc); unload_nls(sbi->nls_tab); truncate_inode_pages(sbi->direct_inode->i_mapping, 0); iput(sbi->direct_inode); kfree(sbi); } enum { Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota, Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask }; static const match_table_t tokens = { {Opt_integrity, "integrity"}, {Opt_nointegrity, "nointegrity"}, {Opt_iocharset, "iocharset=%s"}, {Opt_resize, "resize=%u"}, {Opt_resize_nosize, "resize"}, {Opt_errors, "errors=%s"}, {Opt_ignore, "noquota"}, {Opt_ignore, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_grpquota, "grpquota"}, {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_umask, "umask=%u"}, {Opt_err, NULL} }; static int parse_options(char *options, struct super_block *sb, s64 *newLVSize, int *flag) { void *nls_map = (void *)-1; /* -1: no change; NULL: none */ char *p; struct jfs_sb_info *sbi = JFS_SBI(sb); *newLVSize = 0; if (!options) return 1; while ((p = strsep(&options, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_integrity: *flag &= ~JFS_NOINTEGRITY; break; case Opt_nointegrity: *flag |= JFS_NOINTEGRITY; break; case Opt_ignore: /* Silently ignore the quota options */ /* Don't do anything ;-) */ break; case Opt_iocharset: if (nls_map && nls_map != (void *) -1) unload_nls(nls_map); if (!strcmp(args[0].from, "none")) nls_map = NULL; else { nls_map = load_nls(args[0].from); if (!nls_map) { printk(KERN_ERR "JFS: charset not found\n"); goto cleanup; } } break; case Opt_resize: { char *resize = args[0].from; *newLVSize = simple_strtoull(resize, &resize, 0); break; } case Opt_resize_nosize: { *newLVSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; if (*newLVSize == 0) printk(KERN_ERR "JFS: Cannot determine volume size\n"); break; } case Opt_errors: { char *errors = args[0].from; if (!errors || !*errors) goto cleanup; if (!strcmp(errors, "continue")) { *flag &= ~JFS_ERR_REMOUNT_RO; *flag &= ~JFS_ERR_PANIC; *flag |= JFS_ERR_CONTINUE; } else if (!strcmp(errors, "remount-ro")) { *flag &= ~JFS_ERR_CONTINUE; *flag &= ~JFS_ERR_PANIC; *flag |= JFS_ERR_REMOUNT_RO; } else if (!strcmp(errors, "panic")) { *flag &= ~JFS_ERR_CONTINUE; *flag &= ~JFS_ERR_REMOUNT_RO; *flag |= JFS_ERR_PANIC; } else { printk(KERN_ERR "JFS: %s is an invalid error handler\n", errors); goto cleanup; } break; } #ifdef CONFIG_QUOTA case Opt_quota: case Opt_usrquota: *flag |= JFS_USRQUOTA; break; case Opt_grpquota: *flag |= JFS_GRPQUOTA; break; #else case Opt_usrquota: case Opt_grpquota: case Opt_quota: printk(KERN_ERR "JFS: quota operations not supported\n"); break; #endif case Opt_uid: { char *uid = args[0].from; sbi->uid = simple_strtoul(uid, &uid, 0); break; } case Opt_gid: { char *gid = args[0].from; sbi->gid = simple_strtoul(gid, &gid, 0); break; } case Opt_umask: { char *umask = args[0].from; sbi->umask = simple_strtoul(umask, &umask, 8); if (sbi->umask & ~0777) { printk(KERN_ERR "JFS: Invalid value of umask\n"); goto cleanup; } break; } default: printk("jfs: Unrecognized mount option \"%s\" " " or missing value\n", p); goto cleanup; } } if (nls_map != (void *) -1) { /* Discard old (if remount) */ unload_nls(sbi->nls_tab); sbi->nls_tab = nls_map; } return 1; cleanup: if (nls_map && nls_map != (void *) -1) unload_nls(nls_map); return 0; } static int jfs_remount(struct super_block *sb, int *flags, char *data) { s64 newLVSize = 0; int rc = 0; int flag = JFS_SBI(sb)->flag; int ret; if (!parse_options(data, sb, &newLVSize, &flag)) { return -EINVAL; } if (newLVSize) { if (sb->s_flags & MS_RDONLY) { printk(KERN_ERR "JFS: resize requires volume to be mounted read-write\n"); return -EROFS; } rc = jfs_extendfs(sb, newLVSize, 0); if (rc) return rc; } if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { /* * Invalidate any previously read metadata. fsck may have * changed the on-disk data since we mounted r/o */ truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0); JFS_SBI(sb)->flag = flag; ret = jfs_mount_rw(sb, 1); /* mark the fs r/w for quota activity */ sb->s_flags &= ~MS_RDONLY; dquot_resume(sb, -1); return ret; } if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) { rc = dquot_suspend(sb, -1); if (rc < 0) { return rc; } rc = jfs_umount_rw(sb); JFS_SBI(sb)->flag = flag; return rc; } if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY)) if (!(sb->s_flags & MS_RDONLY)) { rc = jfs_umount_rw(sb); if (rc) return rc; JFS_SBI(sb)->flag = flag; ret = jfs_mount_rw(sb, 1); return ret; } JFS_SBI(sb)->flag = flag; return 0; } static int jfs_fill_super(struct super_block *sb, void *data, int silent) { struct jfs_sb_info *sbi; struct inode *inode; int rc; s64 newLVSize = 0; int flag, ret = -EINVAL; jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags); if (!new_valid_dev(sb->s_bdev->bd_dev)) return -EOVERFLOW; sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; sb->s_max_links = JFS_LINK_MAX; sbi->sb = sb; sbi->uid = sbi->gid = sbi->umask = -1; /* initialize the mount flag and determine the default error handler */ flag = JFS_ERR_REMOUNT_RO; if (!parse_options((char *) data, sb, &newLVSize, &flag)) goto out_kfree; sbi->flag = flag; #ifdef CONFIG_JFS_POSIX_ACL sb->s_flags |= MS_POSIXACL; #endif if (newLVSize) { printk(KERN_ERR "resize option for remount only\n"); goto out_kfree; } /* * Initialize blocksize to 4K. */ sb_set_blocksize(sb, PSIZE); /* * Set method vectors. */ sb->s_op = &jfs_super_operations; sb->s_export_op = &jfs_export_operations; #ifdef CONFIG_QUOTA sb->dq_op = &dquot_operations; sb->s_qcop = &dquot_quotactl_ops; #endif /* * Initialize direct-mapping inode/address-space */ inode = new_inode(sb); if (inode == NULL) { ret = -ENOMEM; goto out_unload; } inode->i_ino = 0; inode->i_size = sb->s_bdev->bd_inode->i_size; inode->i_mapping->a_ops = &jfs_metapage_aops; insert_inode_hash(inode); mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); sbi->direct_inode = inode; rc = jfs_mount(sb); if (rc) { if (!silent) { jfs_err("jfs_mount failed w/return code = %d", rc); } goto out_mount_failed; } if (sb->s_flags & MS_RDONLY) sbi->log = NULL; else { rc = jfs_mount_rw(sb, 0); if (rc) { if (!silent) { jfs_err("jfs_mount_rw failed, return code = %d", rc); } goto out_no_rw; } } sb->s_magic = JFS_SUPER_MAGIC; if (sbi->mntflag & JFS_OS2) sb->s_d_op = &jfs_ci_dentry_operations; inode = jfs_iget(sb, ROOT_I); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto out_no_rw; } sb->s_root = d_make_root(inode); if (!sb->s_root) goto out_no_root; /* logical blocks are represented by 40 bits in pxd_t, etc. */ sb->s_maxbytes = ((u64) sb->s_blocksize) << 40; #if BITS_PER_LONG == 32 /* * Page cache is indexed by long. * I would use MAX_LFS_FILESIZE, but it's only half as big */ sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, (u64)sb->s_maxbytes); #endif sb->s_time_gran = 1; return 0; out_no_root: jfs_err("jfs_read_super: get root dentry failed"); out_no_rw: rc = jfs_umount(sb); if (rc) { jfs_err("jfs_umount failed with return code %d", rc); } out_mount_failed: filemap_write_and_wait(sbi->direct_inode->i_mapping); truncate_inode_pages(sbi->direct_inode->i_mapping, 0); make_bad_inode(sbi->direct_inode); iput(sbi->direct_inode); sbi->direct_inode = NULL; out_unload: if (sbi->nls_tab) unload_nls(sbi->nls_tab); out_kfree: kfree(sbi); return ret; } static int jfs_freeze(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); struct jfs_log *log = sbi->log; if (!(sb->s_flags & MS_RDONLY)) { txQuiesce(sb); lmLogShutdown(log); updateSuper(sb, FM_CLEAN); } return 0; } static int jfs_unfreeze(struct super_block *sb) { struct jfs_sb_info *sbi = JFS_SBI(sb); struct jfs_log *log = sbi->log; int rc = 0; if (!(sb->s_flags & MS_RDONLY)) { updateSuper(sb, FM_MOUNT); if ((rc = lmLogInit(log))) jfs_err("jfs_unlock failed with return code %d", rc); else txResume(sb); } return 0; } static struct dentry *jfs_do_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super); } static int jfs_sync_fs(struct super_block *sb, int wait) { struct jfs_log *log = JFS_SBI(sb)->log; /* log == NULL indicates read-only mount */ if (log) { jfs_flush_journal(log, wait); jfs_syncpt(log, 0); } return 0; } static int jfs_show_options(struct seq_file *seq, struct dentry *root) { struct jfs_sb_info *sbi = JFS_SBI(root->d_sb); if (sbi->uid != -1) seq_printf(seq, ",uid=%d", sbi->uid); if (sbi->gid != -1) seq_printf(seq, ",gid=%d", sbi->gid); if (sbi->umask != -1) seq_printf(seq, ",umask=%03o", sbi->umask); if (sbi->flag & JFS_NOINTEGRITY) seq_puts(seq, ",nointegrity"); if (sbi->nls_tab) seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset); if (sbi->flag & JFS_ERR_CONTINUE) seq_printf(seq, ",errors=continue"); if (sbi->flag & JFS_ERR_PANIC) seq_printf(seq, ",errors=panic"); #ifdef CONFIG_QUOTA if (sbi->flag & JFS_USRQUOTA) seq_puts(seq, ",usrquota"); if (sbi->flag & JFS_GRPQUOTA) seq_puts(seq, ",grpquota"); #endif return 0; } #ifdef CONFIG_QUOTA /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> sb->s_blocksize_bits; int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head tmp_bh; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; tmp_bh.b_state = 0; tmp_bh.b_size = 1 << inode->i_blkbits; err = jfs_get_block(inode, blk, &tmp_bh, 0); if (err) return err; if (!buffer_mapped(&tmp_bh)) /* A hole? */ memset(data, 0, tocopy); else { bh = sb_bread(sb, tmp_bh.b_blocknr); if (!bh) return -EIO; memcpy(data, bh->b_data+offset, tocopy); brelse(bh); } offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile */ static ssize_t jfs_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> sb->s_blocksize_bits; int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t towrite = len; struct buffer_head tmp_bh; struct buffer_head *bh; mutex_lock(&inode->i_mutex); while (towrite > 0) { tocopy = sb->s_blocksize - offset < towrite ? sb->s_blocksize - offset : towrite; tmp_bh.b_state = 0; tmp_bh.b_size = 1 << inode->i_blkbits; err = jfs_get_block(inode, blk, &tmp_bh, 1); if (err) goto out; if (offset || tocopy != sb->s_blocksize) bh = sb_bread(sb, tmp_bh.b_blocknr); else bh = sb_getblk(sb, tmp_bh.b_blocknr); if (!bh) { err = -EIO; goto out; } lock_buffer(bh); memcpy(bh->b_data+offset, data, tocopy); flush_dcache_page(bh->b_page); set_buffer_uptodate(bh); mark_buffer_dirty(bh); unlock_buffer(bh); brelse(bh); offset = 0; towrite -= tocopy; data += tocopy; blk++; } out: if (len == towrite) { mutex_unlock(&inode->i_mutex); return err; } if (inode->i_size < off+len-towrite) i_size_write(inode, off+len-towrite); inode->i_version++; inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); mutex_unlock(&inode->i_mutex); return len - towrite; } #endif static const struct super_operations jfs_super_operations = { .alloc_inode = jfs_alloc_inode, .destroy_inode = jfs_destroy_inode, .dirty_inode = jfs_dirty_inode, .write_inode = jfs_write_inode, .evict_inode = jfs_evict_inode, .put_super = jfs_put_super, .sync_fs = jfs_sync_fs, .freeze_fs = jfs_freeze, .unfreeze_fs = jfs_unfreeze, .statfs = jfs_statfs, .remount_fs = jfs_remount, .show_options = jfs_show_options, #ifdef CONFIG_QUOTA .quota_read = jfs_quota_read, .quota_write = jfs_quota_write, #endif }; static const struct export_operations jfs_export_operations = { .fh_to_dentry = jfs_fh_to_dentry, .fh_to_parent = jfs_fh_to_parent, .get_parent = jfs_get_parent, }; static struct file_system_type jfs_fs_type = { .owner = THIS_MODULE, .name = "jfs", .mount = jfs_do_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; static void init_once(void *foo) { struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; memset(jfs_ip, 0, sizeof(struct jfs_inode_info)); INIT_LIST_HEAD(&jfs_ip->anon_inode_list); init_rwsem(&jfs_ip->rdwrlock); mutex_init(&jfs_ip->commit_mutex); init_rwsem(&jfs_ip->xattr_sem); spin_lock_init(&jfs_ip->ag_lock); jfs_ip->active_ag = -1; inode_init_once(&jfs_ip->vfs_inode); } static int __init init_jfs_fs(void) { int i; int rc; jfs_inode_cachep = kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, init_once); if (jfs_inode_cachep == NULL) return -ENOMEM; /* * Metapage initialization */ rc = metapage_init(); if (rc) { jfs_err("metapage_init failed w/rc = %d", rc); goto free_slab; } /* * Transaction Manager initialization */ rc = txInit(); if (rc) { jfs_err("txInit failed w/rc = %d", rc); goto free_metapage; } /* * I/O completion thread (endio) */ jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO"); if (IS_ERR(jfsIOthread)) { rc = PTR_ERR(jfsIOthread); jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); goto end_txmngr; } if (commit_threads < 1) commit_threads = num_online_cpus(); if (commit_threads > MAX_COMMIT_THREADS) commit_threads = MAX_COMMIT_THREADS; for (i = 0; i < commit_threads; i++) { jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, "jfsCommit"); if (IS_ERR(jfsCommitThread[i])) { rc = PTR_ERR(jfsCommitThread[i]); jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); commit_threads = i; goto kill_committask; } } jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync"); if (IS_ERR(jfsSyncThread)) { rc = PTR_ERR(jfsSyncThread); jfs_err("init_jfs_fs: fork failed w/rc = %d", rc); goto kill_committask; } #ifdef PROC_FS_JFS jfs_proc_init(); #endif rc = register_filesystem(&jfs_fs_type); if (!rc) return 0; #ifdef PROC_FS_JFS jfs_proc_clean(); #endif kthread_stop(jfsSyncThread); kill_committask: for (i = 0; i < commit_threads; i++) kthread_stop(jfsCommitThread[i]); kthread_stop(jfsIOthread); end_txmngr: txExit(); free_metapage: metapage_exit(); free_slab: kmem_cache_destroy(jfs_inode_cachep); return rc; } static void __exit exit_jfs_fs(void) { int i; jfs_info("exit_jfs_fs called"); txExit(); metapage_exit(); kthread_stop(jfsIOthread); for (i = 0; i < commit_threads; i++) kthread_stop(jfsCommitThread[i]); kthread_stop(jfsSyncThread); #ifdef PROC_FS_JFS jfs_proc_clean(); #endif unregister_filesystem(&jfs_fs_type); kmem_cache_destroy(jfs_inode_cachep); } module_init(init_jfs_fs) module_exit(exit_jfs_fs)
gpl-2.0
jamison904/Nexus7_kernel
arch/tile/lib/cacheflush.c
4614
5545
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <asm/page.h> #include <asm/cacheflush.h> #include <arch/icache.h> #include <arch/spr_def.h> void __flush_icache_range(unsigned long start, unsigned long end) { invalidate_icache((const void *)start, end - start, PAGE_SIZE); } /* Force a load instruction to issue. */ static inline void force_load(char *p) { *(volatile char *)p; } /* * Flush and invalidate a VA range that is homed remotely on a single * core (if "!hfh") or homed via hash-for-home (if "hfh"), waiting * until the memory controller holds the flushed values. */ void finv_buffer_remote(void *buffer, size_t size, int hfh) { char *p, *base; size_t step_size, load_count; /* * On TILEPro the striping granularity is a fixed 8KB; on * TILE-Gx it is configurable, and we rely on the fact that * the hypervisor always configures maximum striping, so that * bits 9 and 10 of the PA are part of the stripe function, so * every 512 bytes we hit a striping boundary. * */ #ifdef __tilegx__ const unsigned long STRIPE_WIDTH = 512; #else const unsigned long STRIPE_WIDTH = 8192; #endif #ifdef __tilegx__ /* * On TILE-Gx, we must disable the dstream prefetcher before doing * a cache flush; otherwise, we could end up with data in the cache * that we don't want there. Note that normally we'd do an mf * after the SPR write to disabling the prefetcher, but we do one * below, before any further loads, so there's no need to do it * here. */ uint_reg_t old_dstream_pf = __insn_mfspr(SPR_DSTREAM_PF); __insn_mtspr(SPR_DSTREAM_PF, 0); #endif /* * Flush and invalidate the buffer out of the local L1/L2 * and request the home cache to flush and invalidate as well. */ __finv_buffer(buffer, size); /* * Wait for the home cache to acknowledge that it has processed * all the flush-and-invalidate requests. This does not mean * that the flushed data has reached the memory controller yet, * but it does mean the home cache is processing the flushes. */ __insn_mf(); /* * Issue a load to the last cache line, which can't complete * until all the previously-issued flushes to the same memory * controller have also completed. If we weren't striping * memory, that one load would be sufficient, but since we may * be, we also need to back up to the last load issued to * another memory controller, which would be the point where * we crossed a "striping" boundary (the granularity of striping * across memory controllers). Keep backing up and doing this * until we are before the beginning of the buffer, or have * hit all the controllers. * * If we are flushing a hash-for-home buffer, it's even worse. * Each line may be homed on a different tile, and each tile * may have up to four lines that are on different * controllers. So as we walk backwards, we have to touch * enough cache lines to satisfy these constraints. In * practice this ends up being close enough to "load from * every cache line on a full memory stripe on each * controller" that we simply do that, to simplify the logic. * * On TILE-Gx the hash-for-home function is much more complex, * with the upshot being we can't readily guarantee we have * hit both entries in the 128-entry AMT that were hit by any * load in the entire range, so we just re-load them all. * With larger buffers, we may want to consider using a hypervisor * trap to issue loads directly to each hash-for-home tile for * each controller (doing it from Linux would trash the TLB). */ if (hfh) { step_size = L2_CACHE_BYTES; #ifdef __tilegx__ load_count = (size + L2_CACHE_BYTES - 1) / L2_CACHE_BYTES; #else load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) * (1 << CHIP_LOG_NUM_MSHIMS()); #endif } else { step_size = STRIPE_WIDTH; load_count = (1 << CHIP_LOG_NUM_MSHIMS()); } /* Load the last byte of the buffer. */ p = (char *)buffer + size - 1; force_load(p); /* Bump down to the end of the previous stripe or cache line. */ p -= step_size; p = (char *)((unsigned long)p | (step_size - 1)); /* Figure out how far back we need to go. */ base = p - (step_size * (load_count - 2)); if ((unsigned long)base < (unsigned long)buffer) base = buffer; /* * Fire all the loads we need. The MAF only has eight entries * so we can have at most eight outstanding loads, so we * unroll by that amount. */ #pragma unroll 8 for (; p >= base; p -= step_size) force_load(p); /* * Repeat, but with inv's instead of loads, to get rid of the * data we just loaded into our own cache and the old home L3. * No need to unroll since inv's don't target a register. */ p = (char *)buffer + size - 1; __insn_inv(p); p -= step_size; p = (char *)((unsigned long)p | (step_size - 1)); for (; p >= base; p -= step_size) __insn_inv(p); /* Wait for the load+inv's (and thus finvs) to have completed. */ __insn_mf(); #ifdef __tilegx__ /* Reenable the prefetcher. */ __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf); #endif }
gpl-2.0
kaber/nf-next-ipv6-nat
drivers/char/rtc.c
4870
34303
/* * Real Time Clock interface for Linux * * Copyright (C) 1996 Paul Gortmaker * * This driver allows use of the real time clock (built into * nearly all computers) from user space. It exports the /dev/rtc * interface supporting various ioctl() and also the * /proc/driver/rtc pseudo-file for status information. * * The ioctls can be used to set the interrupt behaviour and * generation rate from the RTC via IRQ 8. Then the /dev/rtc * interface can be used to make use of these timer interrupts, * be they interval or alarm based. * * The /dev/rtc interface will block on reads until an interrupt * has been received. If a RTC interrupt has already happened, * it will output an unsigned long and then block. The output value * contains the interrupt status in the low byte and the number of * interrupts since the last read in the remaining high bytes. The * /dev/rtc interface can also be used with the select(2) call. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Based on other minimal char device drivers, like Alan's * watchdog, Ted's random, etc. etc. * * 1.07 Paul Gortmaker. * 1.08 Miquel van Smoorenburg: disallow certain things on the * DEC Alpha as the CMOS clock is also used for other things. * 1.09 Nikita Schmidt: epoch support and some Alpha cleanup. * 1.09a Pete Zaitcev: Sun SPARC * 1.09b Jeff Garzik: Modularize, init cleanup * 1.09c Jeff Garzik: SMP cleanup * 1.10 Paul Barton-Davis: add support for async I/O * 1.10a Andrea Arcangeli: Alpha updates * 1.10b Andrew Morton: SMP lock fix * 1.10c Cesar Barros: SMP locking fixes and cleanup * 1.10d Paul Gortmaker: delete paranoia check in rtc_exit * 1.10e Maciej W. Rozycki: Handle DECstation's year weirdness. * 1.11 Takashi Iwai: Kernel access functions * rtc_register/rtc_unregister/rtc_control * 1.11a Daniele Bellucci: Audit create_proc_read_entry in rtc_init * 1.12 Venkatesh Pallipadi: Hooks for emulating rtc on HPET base-timer * CONFIG_HPET_EMULATE_RTC * 1.12a Maciej W. Rozycki: Handle memory-mapped chips properly. * 1.12ac Alan Cox: Allow read access to the day of week register * 1.12b David John: Remove calls to the BKL. */ #define RTC_VERSION "1.12b" /* * Note that *all* calls to CMOS_READ and CMOS_WRITE are done with * interrupts disabled. Due to the index-port/data-port (0x70/0x71) * design of the RTC, we don't want two different things trying to * get to it at once. (e.g. the periodic 11 min sync from * kernel/time/ntp.c vs. this driver.) */ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/ioport.h> #include <linux/fcntl.h> #include <linux/mc146818rtc.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/sysctl.h> #include <linux/wait.h> #include <linux/bcd.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/ratelimit.h> #include <asm/current.h> #ifdef CONFIG_X86 #include <asm/hpet.h> #endif #ifdef CONFIG_SPARC32 #include <linux/of.h> #include <linux/of_device.h> #include <asm/io.h> static unsigned long rtc_port; static int rtc_irq; #endif #ifdef CONFIG_HPET_EMULATE_RTC #undef RTC_IRQ #endif #ifdef RTC_IRQ static int rtc_has_irq = 1; #endif #ifndef CONFIG_HPET_EMULATE_RTC #define is_hpet_enabled() 0 #define hpet_set_alarm_time(hrs, min, sec) 0 #define hpet_set_periodic_freq(arg) 0 #define hpet_mask_rtc_irq_bit(arg) 0 #define hpet_set_rtc_irq_bit(arg) 0 #define hpet_rtc_timer_init() do { } while (0) #define hpet_rtc_dropped_irq() 0 #define hpet_register_irq_handler(h) ({ 0; }) #define hpet_unregister_irq_handler(h) ({ 0; }) #ifdef RTC_IRQ static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) { return 0; } #endif #endif /* * We sponge a minor off of the misc major. No need slurping * up another valuable major dev number for this. If you add * an ioctl, make sure you don't conflict with SPARC's RTC * ioctls. */ static struct fasync_struct *rtc_async_queue; static DECLARE_WAIT_QUEUE_HEAD(rtc_wait); #ifdef RTC_IRQ static void rtc_dropped_irq(unsigned long data); static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq, 0, 0); #endif static ssize_t rtc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos); static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static void rtc_get_rtc_time(struct rtc_time *rtc_tm); #ifdef RTC_IRQ static unsigned int rtc_poll(struct file *file, poll_table *wait); #endif static void get_rtc_alm_time(struct rtc_time *alm_tm); #ifdef RTC_IRQ static void set_rtc_irq_bit_locked(unsigned char bit); static void mask_rtc_irq_bit_locked(unsigned char bit); static inline void set_rtc_irq_bit(unsigned char bit) { spin_lock_irq(&rtc_lock); set_rtc_irq_bit_locked(bit); spin_unlock_irq(&rtc_lock); } static void mask_rtc_irq_bit(unsigned char bit) { spin_lock_irq(&rtc_lock); mask_rtc_irq_bit_locked(bit); spin_unlock_irq(&rtc_lock); } #endif #ifdef CONFIG_PROC_FS static int rtc_proc_open(struct inode *inode, struct file *file); #endif /* * Bits in rtc_status. (6 bits of room for future expansion) */ #define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ #define RTC_TIMER_ON 0x02 /* missed irq timer active */ /* * rtc_status is never changed by rtc_interrupt, and ioctl/open/close is * protected by the spin lock rtc_lock. However, ioctl can still disable the * timer in rtc_status and then with del_timer after the interrupt has read * rtc_status but before mod_timer is called, which would then reenable the * timer (but you would need to have an awful timing before you'd trip on it) */ static unsigned long rtc_status; /* bitmapped status byte. */ static unsigned long rtc_freq; /* Current periodic IRQ rate */ static unsigned long rtc_irq_data; /* our output to the world */ static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */ #ifdef RTC_IRQ /* * rtc_task_lock nests inside rtc_lock. */ static DEFINE_SPINLOCK(rtc_task_lock); static rtc_task_t *rtc_callback; #endif /* * If this driver ever becomes modularised, it will be really nice * to make the epoch retain its value across module reload... */ static unsigned long epoch = 1900; /* year corresponding to 0x00 */ static const unsigned char days_in_mo[] = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; /* * Returns true if a clock update is in progress */ static inline unsigned char rtc_is_updating(void) { unsigned long flags; unsigned char uip; spin_lock_irqsave(&rtc_lock, flags); uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); spin_unlock_irqrestore(&rtc_lock, flags); return uip; } #ifdef RTC_IRQ /* * A very tiny interrupt handler. It runs with IRQF_DISABLED set, * but there is possibility of conflicting with the set_rtc_mmss() * call (the rtc irq and the timer irq can easily run at the same * time in two different CPUs). So we need to serialize * accesses to the chip with the rtc_lock spinlock that each * architecture should implement in the timer code. * (See ./arch/XXXX/kernel/time.c for the set_rtc_mmss() function.) */ static irqreturn_t rtc_interrupt(int irq, void *dev_id) { /* * Can be an alarm interrupt, update complete interrupt, * or a periodic interrupt. We store the status in the * low byte and the number of interrupts received since * the last read in the remainder of rtc_irq_data. */ spin_lock(&rtc_lock); rtc_irq_data += 0x100; rtc_irq_data &= ~0xff; if (is_hpet_enabled()) { /* * In this case it is HPET RTC interrupt handler * calling us, with the interrupt information * passed as arg1, instead of irq. */ rtc_irq_data |= (unsigned long)irq & 0xF0; } else { rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); } if (rtc_status & RTC_TIMER_ON) mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); spin_unlock(&rtc_lock); /* Now do the rest of the actions */ spin_lock(&rtc_task_lock); if (rtc_callback) rtc_callback->func(rtc_callback->private_data); spin_unlock(&rtc_task_lock); wake_up_interruptible(&rtc_wait); kill_fasync(&rtc_async_queue, SIGIO, POLL_IN); return IRQ_HANDLED; } #endif /* * sysctl-tuning infrastructure. */ static ctl_table rtc_table[] = { { .procname = "max-user-freq", .data = &rtc_max_user_freq, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; static ctl_table rtc_root[] = { { .procname = "rtc", .mode = 0555, .child = rtc_table, }, { } }; static ctl_table dev_root[] = { { .procname = "dev", .mode = 0555, .child = rtc_root, }, { } }; static struct ctl_table_header *sysctl_header; static int __init init_sysctl(void) { sysctl_header = register_sysctl_table(dev_root); return 0; } static void __exit cleanup_sysctl(void) { unregister_sysctl_table(sysctl_header); } /* * Now all the various file operations that we export. */ static ssize_t rtc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { #ifndef RTC_IRQ return -EIO; #else DECLARE_WAITQUEUE(wait, current); unsigned long data; ssize_t retval; if (rtc_has_irq == 0) return -EIO; /* * Historically this function used to assume that sizeof(unsigned long) * is the same in userspace and kernelspace. This lead to problems * for configurations with multiple ABIs such a the MIPS o32 and 64 * ABIs supported on the same kernel. So now we support read of both * 4 and 8 bytes and assume that's the sizeof(unsigned long) in the * userspace ABI. */ if (count != sizeof(unsigned int) && count != sizeof(unsigned long)) return -EINVAL; add_wait_queue(&rtc_wait, &wait); do { /* First make it right. Then make it fast. Putting this whole * block within the parentheses of a while would be too * confusing. And no, xchg() is not the answer. */ __set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(&rtc_lock); data = rtc_irq_data; rtc_irq_data = 0; spin_unlock_irq(&rtc_lock); if (data != 0) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto out; } if (signal_pending(current)) { retval = -ERESTARTSYS; goto out; } schedule(); } while (1); if (count == sizeof(unsigned int)) { retval = put_user(data, (unsigned int __user *)buf) ?: sizeof(int); } else { retval = put_user(data, (unsigned long __user *)buf) ?: sizeof(long); } if (!retval) retval = count; out: __set_current_state(TASK_RUNNING); remove_wait_queue(&rtc_wait, &wait); return retval; #endif } static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) { struct rtc_time wtime; #ifdef RTC_IRQ if (rtc_has_irq == 0) { switch (cmd) { case RTC_AIE_OFF: case RTC_AIE_ON: case RTC_PIE_OFF: case RTC_PIE_ON: case RTC_UIE_OFF: case RTC_UIE_ON: case RTC_IRQP_READ: case RTC_IRQP_SET: return -EINVAL; }; } #endif switch (cmd) { #ifdef RTC_IRQ case RTC_AIE_OFF: /* Mask alarm int. enab. bit */ { mask_rtc_irq_bit(RTC_AIE); return 0; } case RTC_AIE_ON: /* Allow alarm interrupts. */ { set_rtc_irq_bit(RTC_AIE); return 0; } case RTC_PIE_OFF: /* Mask periodic int. enab. bit */ { /* can be called from isr via rtc_control() */ unsigned long flags; spin_lock_irqsave(&rtc_lock, flags); mask_rtc_irq_bit_locked(RTC_PIE); if (rtc_status & RTC_TIMER_ON) { rtc_status &= ~RTC_TIMER_ON; del_timer(&rtc_irq_timer); } spin_unlock_irqrestore(&rtc_lock, flags); return 0; } case RTC_PIE_ON: /* Allow periodic ints */ { /* can be called from isr via rtc_control() */ unsigned long flags; /* * We don't really want Joe User enabling more * than 64Hz of interrupts on a multi-user machine. */ if (!kernel && (rtc_freq > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE))) return -EACCES; spin_lock_irqsave(&rtc_lock, flags); if (!(rtc_status & RTC_TIMER_ON)) { mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); rtc_status |= RTC_TIMER_ON; } set_rtc_irq_bit_locked(RTC_PIE); spin_unlock_irqrestore(&rtc_lock, flags); return 0; } case RTC_UIE_OFF: /* Mask ints from RTC updates. */ { mask_rtc_irq_bit(RTC_UIE); return 0; } case RTC_UIE_ON: /* Allow ints for RTC updates. */ { set_rtc_irq_bit(RTC_UIE); return 0; } #endif case RTC_ALM_READ: /* Read the present alarm time */ { /* * This returns a struct rtc_time. Reading >= 0xc0 * means "don't care" or "match all". Only the tm_hour, * tm_min, and tm_sec values are filled in. */ memset(&wtime, 0, sizeof(struct rtc_time)); get_rtc_alm_time(&wtime); break; } case RTC_ALM_SET: /* Store a time into the alarm */ { /* * This expects a struct rtc_time. Writing 0xff means * "don't care" or "match all". Only the tm_hour, * tm_min and tm_sec are used. */ unsigned char hrs, min, sec; struct rtc_time alm_tm; if (copy_from_user(&alm_tm, (struct rtc_time __user *)arg, sizeof(struct rtc_time))) return -EFAULT; hrs = alm_tm.tm_hour; min = alm_tm.tm_min; sec = alm_tm.tm_sec; spin_lock_irq(&rtc_lock); if (hpet_set_alarm_time(hrs, min, sec)) { /* * Fallthru and set alarm time in CMOS too, * so that we will get proper value in RTC_ALM_READ */ } if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { if (sec < 60) sec = bin2bcd(sec); else sec = 0xff; if (min < 60) min = bin2bcd(min); else min = 0xff; if (hrs < 24) hrs = bin2bcd(hrs); else hrs = 0xff; } CMOS_WRITE(hrs, RTC_HOURS_ALARM); CMOS_WRITE(min, RTC_MINUTES_ALARM); CMOS_WRITE(sec, RTC_SECONDS_ALARM); spin_unlock_irq(&rtc_lock); return 0; } case RTC_RD_TIME: /* Read the time/date from RTC */ { memset(&wtime, 0, sizeof(struct rtc_time)); rtc_get_rtc_time(&wtime); break; } case RTC_SET_TIME: /* Set the RTC */ { struct rtc_time rtc_tm; unsigned char mon, day, hrs, min, sec, leap_yr; unsigned char save_control, save_freq_select; unsigned int yrs; #ifdef CONFIG_MACH_DECSTATION unsigned int real_yrs; #endif if (!capable(CAP_SYS_TIME)) return -EACCES; if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg, sizeof(struct rtc_time))) return -EFAULT; yrs = rtc_tm.tm_year + 1900; mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */ day = rtc_tm.tm_mday; hrs = rtc_tm.tm_hour; min = rtc_tm.tm_min; sec = rtc_tm.tm_sec; if (yrs < 1970) return -EINVAL; leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400)); if ((mon > 12) || (day == 0)) return -EINVAL; if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) return -EINVAL; if ((hrs >= 24) || (min >= 60) || (sec >= 60)) return -EINVAL; yrs -= epoch; if (yrs > 255) /* They are unsigned */ return -EINVAL; spin_lock_irq(&rtc_lock); #ifdef CONFIG_MACH_DECSTATION real_yrs = yrs; yrs = 72; /* * We want to keep the year set to 73 until March * for non-leap years, so that Feb, 29th is handled * correctly. */ if (!leap_yr && mon < 3) { real_yrs--; yrs = 73; } #endif /* These limits and adjustments are independent of * whether the chip is in binary mode or not. */ if (yrs > 169) { spin_unlock_irq(&rtc_lock); return -EINVAL; } if (yrs >= 100) yrs -= 100; if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { sec = bin2bcd(sec); min = bin2bcd(min); hrs = bin2bcd(hrs); day = bin2bcd(day); mon = bin2bcd(mon); yrs = bin2bcd(yrs); } save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); #endif CMOS_WRITE(yrs, RTC_YEAR); CMOS_WRITE(mon, RTC_MONTH); CMOS_WRITE(day, RTC_DAY_OF_MONTH); CMOS_WRITE(hrs, RTC_HOURS); CMOS_WRITE(min, RTC_MINUTES); CMOS_WRITE(sec, RTC_SECONDS); CMOS_WRITE(save_control, RTC_CONTROL); CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); spin_unlock_irq(&rtc_lock); return 0; } #ifdef RTC_IRQ case RTC_IRQP_READ: /* Read the periodic IRQ rate. */ { return put_user(rtc_freq, (unsigned long __user *)arg); } case RTC_IRQP_SET: /* Set periodic IRQ rate. */ { int tmp = 0; unsigned char val; /* can be called from isr via rtc_control() */ unsigned long flags; /* * The max we can do is 8192Hz. */ if ((arg < 2) || (arg > 8192)) return -EINVAL; /* * We don't really want Joe User generating more * than 64Hz of interrupts on a multi-user machine. */ if (!kernel && (arg > rtc_max_user_freq) && !capable(CAP_SYS_RESOURCE)) return -EACCES; while (arg > (1<<tmp)) tmp++; /* * Check that the input was really a power of 2. */ if (arg != (1<<tmp)) return -EINVAL; rtc_freq = arg; spin_lock_irqsave(&rtc_lock, flags); if (hpet_set_periodic_freq(arg)) { spin_unlock_irqrestore(&rtc_lock, flags); return 0; } val = CMOS_READ(RTC_FREQ_SELECT) & 0xf0; val |= (16 - tmp); CMOS_WRITE(val, RTC_FREQ_SELECT); spin_unlock_irqrestore(&rtc_lock, flags); return 0; } #endif case RTC_EPOCH_READ: /* Read the epoch. */ { return put_user(epoch, (unsigned long __user *)arg); } case RTC_EPOCH_SET: /* Set the epoch. */ { /* * There were no RTC clocks before 1900. */ if (arg < 1900) return -EINVAL; if (!capable(CAP_SYS_TIME)) return -EACCES; epoch = arg; return 0; } default: return -ENOTTY; } return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0; } static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; ret = rtc_do_ioctl(cmd, arg, 0); return ret; } /* * We enforce only one user at a time here with the open/close. * Also clear the previous interrupt data on an open, and clean * up things on a close. */ static int rtc_open(struct inode *inode, struct file *file) { spin_lock_irq(&rtc_lock); if (rtc_status & RTC_IS_OPEN) goto out_busy; rtc_status |= RTC_IS_OPEN; rtc_irq_data = 0; spin_unlock_irq(&rtc_lock); return 0; out_busy: spin_unlock_irq(&rtc_lock); return -EBUSY; } static int rtc_fasync(int fd, struct file *filp, int on) { return fasync_helper(fd, filp, on, &rtc_async_queue); } static int rtc_release(struct inode *inode, struct file *file) { #ifdef RTC_IRQ unsigned char tmp; if (rtc_has_irq == 0) goto no_irq; /* * Turn off all interrupts once the device is no longer * in use, and clear the data. */ spin_lock_irq(&rtc_lock); if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) { tmp = CMOS_READ(RTC_CONTROL); tmp &= ~RTC_PIE; tmp &= ~RTC_AIE; tmp &= ~RTC_UIE; CMOS_WRITE(tmp, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); } if (rtc_status & RTC_TIMER_ON) { rtc_status &= ~RTC_TIMER_ON; del_timer(&rtc_irq_timer); } spin_unlock_irq(&rtc_lock); no_irq: #endif spin_lock_irq(&rtc_lock); rtc_irq_data = 0; rtc_status &= ~RTC_IS_OPEN; spin_unlock_irq(&rtc_lock); return 0; } #ifdef RTC_IRQ static unsigned int rtc_poll(struct file *file, poll_table *wait) { unsigned long l; if (rtc_has_irq == 0) return 0; poll_wait(file, &rtc_wait, wait); spin_lock_irq(&rtc_lock); l = rtc_irq_data; spin_unlock_irq(&rtc_lock); if (l != 0) return POLLIN | POLLRDNORM; return 0; } #endif int rtc_register(rtc_task_t *task) { #ifndef RTC_IRQ return -EIO; #else if (task == NULL || task->func == NULL) return -EINVAL; spin_lock_irq(&rtc_lock); if (rtc_status & RTC_IS_OPEN) { spin_unlock_irq(&rtc_lock); return -EBUSY; } spin_lock(&rtc_task_lock); if (rtc_callback) { spin_unlock(&rtc_task_lock); spin_unlock_irq(&rtc_lock); return -EBUSY; } rtc_status |= RTC_IS_OPEN; rtc_callback = task; spin_unlock(&rtc_task_lock); spin_unlock_irq(&rtc_lock); return 0; #endif } EXPORT_SYMBOL(rtc_register); int rtc_unregister(rtc_task_t *task) { #ifndef RTC_IRQ return -EIO; #else unsigned char tmp; spin_lock_irq(&rtc_lock); spin_lock(&rtc_task_lock); if (rtc_callback != task) { spin_unlock(&rtc_task_lock); spin_unlock_irq(&rtc_lock); return -ENXIO; } rtc_callback = NULL; /* disable controls */ if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) { tmp = CMOS_READ(RTC_CONTROL); tmp &= ~RTC_PIE; tmp &= ~RTC_AIE; tmp &= ~RTC_UIE; CMOS_WRITE(tmp, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); } if (rtc_status & RTC_TIMER_ON) { rtc_status &= ~RTC_TIMER_ON; del_timer(&rtc_irq_timer); } rtc_status &= ~RTC_IS_OPEN; spin_unlock(&rtc_task_lock); spin_unlock_irq(&rtc_lock); return 0; #endif } EXPORT_SYMBOL(rtc_unregister); int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg) { #ifndef RTC_IRQ return -EIO; #else unsigned long flags; if (cmd != RTC_PIE_ON && cmd != RTC_PIE_OFF && cmd != RTC_IRQP_SET) return -EINVAL; spin_lock_irqsave(&rtc_task_lock, flags); if (rtc_callback != task) { spin_unlock_irqrestore(&rtc_task_lock, flags); return -ENXIO; } spin_unlock_irqrestore(&rtc_task_lock, flags); return rtc_do_ioctl(cmd, arg, 1); #endif } EXPORT_SYMBOL(rtc_control); /* * The various file operations we support. */ static const struct file_operations rtc_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = rtc_read, #ifdef RTC_IRQ .poll = rtc_poll, #endif .unlocked_ioctl = rtc_ioctl, .open = rtc_open, .release = rtc_release, .fasync = rtc_fasync, }; static struct miscdevice rtc_dev = { .minor = RTC_MINOR, .name = "rtc", .fops = &rtc_fops, }; #ifdef CONFIG_PROC_FS static const struct file_operations rtc_proc_fops = { .owner = THIS_MODULE, .open = rtc_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif static resource_size_t rtc_size; static struct resource * __init rtc_request_region(resource_size_t size) { struct resource *r; if (RTC_IOMAPPED) r = request_region(RTC_PORT(0), size, "rtc"); else r = request_mem_region(RTC_PORT(0), size, "rtc"); if (r) rtc_size = size; return r; } static void rtc_release_region(void) { if (RTC_IOMAPPED) release_region(RTC_PORT(0), rtc_size); else release_mem_region(RTC_PORT(0), rtc_size); } static int __init rtc_init(void) { #ifdef CONFIG_PROC_FS struct proc_dir_entry *ent; #endif #if defined(__alpha__) || defined(__mips__) unsigned int year, ctrl; char *guess = NULL; #endif #ifdef CONFIG_SPARC32 struct device_node *ebus_dp; struct platform_device *op; #else void *r; #ifdef RTC_IRQ irq_handler_t rtc_int_handler_ptr; #endif #endif #ifdef CONFIG_SPARC32 for_each_node_by_name(ebus_dp, "ebus") { struct device_node *dp; for (dp = ebus_dp; dp; dp = dp->sibling) { if (!strcmp(dp->name, "rtc")) { op = of_find_device_by_node(dp); if (op) { rtc_port = op->resource[0].start; rtc_irq = op->irqs[0]; goto found; } } } } rtc_has_irq = 0; printk(KERN_ERR "rtc_init: no PC rtc found\n"); return -EIO; found: if (!rtc_irq) { rtc_has_irq = 0; goto no_irq; } /* * XXX Interrupt pin #7 in Espresso is shared between RTC and * PCI Slot 2 INTA# (and some INTx# in Slot 1). */ if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) { rtc_has_irq = 0; printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq); return -EIO; } no_irq: #else r = rtc_request_region(RTC_IO_EXTENT); /* * If we've already requested a smaller range (for example, because * PNPBIOS or ACPI told us how the device is configured), the request * above might fail because it's too big. * * If so, request just the range we actually use. */ if (!r) r = rtc_request_region(RTC_IO_EXTENT_USED); if (!r) { #ifdef RTC_IRQ rtc_has_irq = 0; #endif printk(KERN_ERR "rtc: I/O resource %lx is not free.\n", (long)(RTC_PORT(0))); return -EIO; } #ifdef RTC_IRQ if (is_hpet_enabled()) { int err; rtc_int_handler_ptr = hpet_rtc_interrupt; err = hpet_register_irq_handler(rtc_interrupt); if (err != 0) { printk(KERN_WARNING "hpet_register_irq_handler failed " "in rtc_init()."); return err; } } else { rtc_int_handler_ptr = rtc_interrupt; } if (request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) { /* Yeah right, seeing as irq 8 doesn't even hit the bus. */ rtc_has_irq = 0; printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ); rtc_release_region(); return -EIO; } hpet_rtc_timer_init(); #endif #endif /* CONFIG_SPARC32 vs. others */ if (misc_register(&rtc_dev)) { #ifdef RTC_IRQ free_irq(RTC_IRQ, NULL); hpet_unregister_irq_handler(rtc_interrupt); rtc_has_irq = 0; #endif rtc_release_region(); return -ENODEV; } #ifdef CONFIG_PROC_FS ent = proc_create("driver/rtc", 0, NULL, &rtc_proc_fops); if (!ent) printk(KERN_WARNING "rtc: Failed to register with procfs.\n"); #endif #if defined(__alpha__) || defined(__mips__) rtc_freq = HZ; /* Each operating system on an Alpha uses its own epoch. Let's try to guess which one we are using now. */ if (rtc_is_updating() != 0) msleep(20); spin_lock_irq(&rtc_lock); year = CMOS_READ(RTC_YEAR); ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irq(&rtc_lock); if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) year = bcd2bin(year); /* This should never happen... */ if (year < 20) { epoch = 2000; guess = "SRM (post-2000)"; } else if (year >= 20 && year < 48) { epoch = 1980; guess = "ARC console"; } else if (year >= 48 && year < 72) { epoch = 1952; guess = "Digital UNIX"; #if defined(__mips__) } else if (year >= 72 && year < 74) { epoch = 2000; guess = "Digital DECstation"; #else } else if (year >= 70) { epoch = 1900; guess = "Standard PC (1900)"; #endif } if (guess) printk(KERN_INFO "rtc: %s epoch (%lu) detected\n", guess, epoch); #endif #ifdef RTC_IRQ if (rtc_has_irq == 0) goto no_irq2; spin_lock_irq(&rtc_lock); rtc_freq = 1024; if (!hpet_set_periodic_freq(rtc_freq)) { /* * Initialize periodic frequency to CMOS reset default, * which is 1024Hz */ CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), RTC_FREQ_SELECT); } spin_unlock_irq(&rtc_lock); no_irq2: #endif (void) init_sysctl(); printk(KERN_INFO "Real Time Clock Driver v" RTC_VERSION "\n"); return 0; } static void __exit rtc_exit(void) { cleanup_sysctl(); remove_proc_entry("driver/rtc", NULL); misc_deregister(&rtc_dev); #ifdef CONFIG_SPARC32 if (rtc_has_irq) free_irq(rtc_irq, &rtc_port); #else rtc_release_region(); #ifdef RTC_IRQ if (rtc_has_irq) { free_irq(RTC_IRQ, NULL); hpet_unregister_irq_handler(hpet_rtc_interrupt); } #endif #endif /* CONFIG_SPARC32 */ } module_init(rtc_init); module_exit(rtc_exit); #ifdef RTC_IRQ /* * At IRQ rates >= 4096Hz, an interrupt may get lost altogether. * (usually during an IDE disk interrupt, with IRQ unmasking off) * Since the interrupt handler doesn't get called, the IRQ status * byte doesn't get read, and the RTC stops generating interrupts. * A timer is set, and will call this function if/when that happens. * To get it out of this stalled state, we just read the status. * At least a jiffy of interrupts (rtc_freq/HZ) will have been lost. * (You *really* shouldn't be trying to use a non-realtime system * for something that requires a steady > 1KHz signal anyways.) */ static void rtc_dropped_irq(unsigned long data) { unsigned long freq; spin_lock_irq(&rtc_lock); if (hpet_rtc_dropped_irq()) { spin_unlock_irq(&rtc_lock); return; } /* Just in case someone disabled the timer from behind our back... */ if (rtc_status & RTC_TIMER_ON) mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); rtc_irq_data += ((rtc_freq/HZ)<<8); rtc_irq_data &= ~0xff; rtc_irq_data |= (CMOS_READ(RTC_INTR_FLAGS) & 0xF0); /* restart */ freq = rtc_freq; spin_unlock_irq(&rtc_lock); printk_ratelimited(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq); /* Now we have new data */ wake_up_interruptible(&rtc_wait); kill_fasync(&rtc_async_queue, SIGIO, POLL_IN); } #endif #ifdef CONFIG_PROC_FS /* * Info exported via "/proc/driver/rtc". */ static int rtc_proc_show(struct seq_file *seq, void *v) { #define YN(bit) ((ctrl & bit) ? "yes" : "no") #define NY(bit) ((ctrl & bit) ? "no" : "yes") struct rtc_time tm; unsigned char batt, ctrl; unsigned long freq; spin_lock_irq(&rtc_lock); batt = CMOS_READ(RTC_VALID) & RTC_VRT; ctrl = CMOS_READ(RTC_CONTROL); freq = rtc_freq; spin_unlock_irq(&rtc_lock); rtc_get_rtc_time(&tm); /* * There is no way to tell if the luser has the RTC set for local * time or for Universal Standard Time (GMT). Probably local though. */ seq_printf(seq, "rtc_time\t: %02d:%02d:%02d\n" "rtc_date\t: %04d-%02d-%02d\n" "rtc_epoch\t: %04lu\n", tm.tm_hour, tm.tm_min, tm.tm_sec, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, epoch); get_rtc_alm_time(&tm); /* * We implicitly assume 24hr mode here. Alarm values >= 0xc0 will * match any value for that particular field. Values that are * greater than a valid time, but less than 0xc0 shouldn't appear. */ seq_puts(seq, "alarm\t\t: "); if (tm.tm_hour <= 24) seq_printf(seq, "%02d:", tm.tm_hour); else seq_puts(seq, "**:"); if (tm.tm_min <= 59) seq_printf(seq, "%02d:", tm.tm_min); else seq_puts(seq, "**:"); if (tm.tm_sec <= 59) seq_printf(seq, "%02d\n", tm.tm_sec); else seq_puts(seq, "**\n"); seq_printf(seq, "DST_enable\t: %s\n" "BCD\t\t: %s\n" "24hr\t\t: %s\n" "square_wave\t: %s\n" "alarm_IRQ\t: %s\n" "update_IRQ\t: %s\n" "periodic_IRQ\t: %s\n" "periodic_freq\t: %ld\n" "batt_status\t: %s\n", YN(RTC_DST_EN), NY(RTC_DM_BINARY), YN(RTC_24H), YN(RTC_SQWE), YN(RTC_AIE), YN(RTC_UIE), YN(RTC_PIE), freq, batt ? "okay" : "dead"); return 0; #undef YN #undef NY } static int rtc_proc_open(struct inode *inode, struct file *file) { return single_open(file, rtc_proc_show, NULL); } #endif static void rtc_get_rtc_time(struct rtc_time *rtc_tm) { unsigned long uip_watchdog = jiffies, flags; unsigned char ctrl; #ifdef CONFIG_MACH_DECSTATION unsigned int real_year; #endif /* * read RTC once any update in progress is done. The update * can take just over 2ms. We wait 20ms. There is no need to * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. * If you need to know *exactly* when a second has started, enable * periodic update complete interrupts, (via ioctl) and then * immediately read /dev/rtc which will block until you get the IRQ. * Once the read clears, read the RTC time (again via ioctl). Easy. */ while (rtc_is_updating() != 0 && time_before(jiffies, uip_watchdog + 2*HZ/100)) cpu_relax(); /* * Only the values that we read from the RTC are set. We leave * tm_wday, tm_yday and tm_isdst untouched. Note that while the * RTC has RTC_DAY_OF_WEEK, we should usually ignore it, as it is * only updated by the RTC when initially set to a non-zero value. */ spin_lock_irqsave(&rtc_lock, flags); rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS); rtc_tm->tm_min = CMOS_READ(RTC_MINUTES); rtc_tm->tm_hour = CMOS_READ(RTC_HOURS); rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH); rtc_tm->tm_mon = CMOS_READ(RTC_MONTH); rtc_tm->tm_year = CMOS_READ(RTC_YEAR); /* Only set from 2.6.16 onwards */ rtc_tm->tm_wday = CMOS_READ(RTC_DAY_OF_WEEK); #ifdef CONFIG_MACH_DECSTATION real_year = CMOS_READ(RTC_DEC_YEAR); #endif ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irqrestore(&rtc_lock, flags); if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday); rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon); rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); rtc_tm->tm_wday = bcd2bin(rtc_tm->tm_wday); } #ifdef CONFIG_MACH_DECSTATION rtc_tm->tm_year += real_year - 72; #endif /* * Account for differences between how the RTC uses the values * and how they are defined in a struct rtc_time; */ rtc_tm->tm_year += epoch - 1900; if (rtc_tm->tm_year <= 69) rtc_tm->tm_year += 100; rtc_tm->tm_mon--; } static void get_rtc_alm_time(struct rtc_time *alm_tm) { unsigned char ctrl; /* * Only the values that we read from the RTC are set. That * means only tm_hour, tm_min, and tm_sec. */ spin_lock_irq(&rtc_lock); alm_tm->tm_sec = CMOS_READ(RTC_SECONDS_ALARM); alm_tm->tm_min = CMOS_READ(RTC_MINUTES_ALARM); alm_tm->tm_hour = CMOS_READ(RTC_HOURS_ALARM); ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irq(&rtc_lock); if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec); alm_tm->tm_min = bcd2bin(alm_tm->tm_min); alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour); } } #ifdef RTC_IRQ /* * Used to disable/enable interrupts for any one of UIE, AIE, PIE. * Rumour has it that if you frob the interrupt enable/disable * bits in RTC_CONTROL, you should read RTC_INTR_FLAGS, to * ensure you actually start getting interrupts. Probably for * compatibility with older/broken chipset RTC implementations. * We also clear out any old irq data after an ioctl() that * meddles with the interrupt enable/disable bits. */ static void mask_rtc_irq_bit_locked(unsigned char bit) { unsigned char val; if (hpet_mask_rtc_irq_bit(bit)) return; val = CMOS_READ(RTC_CONTROL); val &= ~bit; CMOS_WRITE(val, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); rtc_irq_data = 0; } static void set_rtc_irq_bit_locked(unsigned char bit) { unsigned char val; if (hpet_set_rtc_irq_bit(bit)) return; val = CMOS_READ(RTC_CONTROL); val |= bit; CMOS_WRITE(val, RTC_CONTROL); CMOS_READ(RTC_INTR_FLAGS); rtc_irq_data = 0; } #endif MODULE_AUTHOR("Paul Gortmaker"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(RTC_MINOR);
gpl-2.0
henrix/rpi-linux
drivers/block/aoe/aoechr.c
4870
6392
/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ /* * aoechr.c * AoE character device driver */ #include <linux/hdreg.h> #include <linux/blkdev.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/skbuff.h> #include <linux/export.h> #include "aoe.h" enum { //MINOR_STAT = 1, (moved to sysfs) MINOR_ERR = 2, MINOR_DISCOVER, MINOR_INTERFACES, MINOR_REVALIDATE, MINOR_FLUSH, MSGSZ = 2048, NMSG = 100, /* message backlog to retain */ }; struct aoe_chardev { ulong minor; char name[32]; }; enum { EMFL_VALID = 1 }; struct ErrMsg { short flags; short len; char *msg; }; static DEFINE_MUTEX(aoechr_mutex); /* A ring buffer of error messages, to be read through * "/dev/etherd/err". When no messages are present, * readers will block waiting for messages to appear. */ static struct ErrMsg emsgs[NMSG]; static int emsgs_head_idx, emsgs_tail_idx; static struct completion emsgs_comp; static spinlock_t emsgs_lock; static int nblocked_emsgs_readers; static struct class *aoe_class; static struct aoe_chardev chardevs[] = { { MINOR_ERR, "err" }, { MINOR_DISCOVER, "discover" }, { MINOR_INTERFACES, "interfaces" }, { MINOR_REVALIDATE, "revalidate" }, { MINOR_FLUSH, "flush" }, }; static int discover(void) { aoecmd_cfg(0xffff, 0xff); return 0; } static int interfaces(const char __user *str, size_t size) { if (set_aoe_iflist(str, size)) { printk(KERN_ERR "aoe: could not set interface list: too many interfaces\n"); return -EINVAL; } return 0; } static int revalidate(const char __user *str, size_t size) { int major, minor, n; ulong flags; struct aoedev *d; struct sk_buff *skb; char buf[16]; if (size >= sizeof buf) return -EINVAL; buf[sizeof buf - 1] = '\0'; if (copy_from_user(buf, str, size)) return -EFAULT; n = sscanf(buf, "e%d.%d", &major, &minor); if (n != 2) { pr_err("aoe: invalid device specification %s\n", buf); return -EINVAL; } d = aoedev_by_aoeaddr(major, minor, 0); if (!d) return -EINVAL; spin_lock_irqsave(&d->lock, flags); aoecmd_cleanslate(d); aoecmd_cfg(major, minor); loop: skb = aoecmd_ata_id(d); spin_unlock_irqrestore(&d->lock, flags); /* try again if we are able to sleep a bit, * otherwise give up this revalidation */ if (!skb && !msleep_interruptible(250)) { spin_lock_irqsave(&d->lock, flags); goto loop; } aoedev_put(d); if (skb) { struct sk_buff_head queue; __skb_queue_head_init(&queue); __skb_queue_tail(&queue, skb); aoenet_xmit(&queue); } return 0; } void aoechr_error(char *msg) { struct ErrMsg *em; char *mp; ulong flags, n; n = strlen(msg); spin_lock_irqsave(&emsgs_lock, flags); em = emsgs + emsgs_tail_idx; if ((em->flags & EMFL_VALID)) { bail: spin_unlock_irqrestore(&emsgs_lock, flags); return; } mp = kmemdup(msg, n, GFP_ATOMIC); if (mp == NULL) { printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n); goto bail; } em->msg = mp; em->flags |= EMFL_VALID; em->len = n; emsgs_tail_idx++; emsgs_tail_idx %= ARRAY_SIZE(emsgs); spin_unlock_irqrestore(&emsgs_lock, flags); if (nblocked_emsgs_readers) complete(&emsgs_comp); } static ssize_t aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp) { int ret = -EINVAL; switch ((unsigned long) filp->private_data) { default: printk(KERN_INFO "aoe: can't write to that file.\n"); break; case MINOR_DISCOVER: ret = discover(); break; case MINOR_INTERFACES: ret = interfaces(buf, cnt); break; case MINOR_REVALIDATE: ret = revalidate(buf, cnt); break; case MINOR_FLUSH: ret = aoedev_flush(buf, cnt); break; } if (ret == 0) ret = cnt; return ret; } static int aoechr_open(struct inode *inode, struct file *filp) { int n, i; mutex_lock(&aoechr_mutex); n = iminor(inode); filp->private_data = (void *) (unsigned long) n; for (i = 0; i < ARRAY_SIZE(chardevs); ++i) if (chardevs[i].minor == n) { mutex_unlock(&aoechr_mutex); return 0; } mutex_unlock(&aoechr_mutex); return -EINVAL; } static int aoechr_rel(struct inode *inode, struct file *filp) { return 0; } static ssize_t aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off) { unsigned long n; char *mp; struct ErrMsg *em; ssize_t len; ulong flags; n = (unsigned long) filp->private_data; if (n != MINOR_ERR) return -EFAULT; spin_lock_irqsave(&emsgs_lock, flags); for (;;) { em = emsgs + emsgs_head_idx; if ((em->flags & EMFL_VALID) != 0) break; if (filp->f_flags & O_NDELAY) { spin_unlock_irqrestore(&emsgs_lock, flags); return -EAGAIN; } nblocked_emsgs_readers++; spin_unlock_irqrestore(&emsgs_lock, flags); n = wait_for_completion_interruptible(&emsgs_comp); spin_lock_irqsave(&emsgs_lock, flags); nblocked_emsgs_readers--; if (n) { spin_unlock_irqrestore(&emsgs_lock, flags); return -ERESTARTSYS; } } if (em->len > cnt) { spin_unlock_irqrestore(&emsgs_lock, flags); return -EAGAIN; } mp = em->msg; len = em->len; em->msg = NULL; em->flags &= ~EMFL_VALID; emsgs_head_idx++; emsgs_head_idx %= ARRAY_SIZE(emsgs); spin_unlock_irqrestore(&emsgs_lock, flags); n = copy_to_user(buf, mp, len); kfree(mp); return n == 0 ? len : -EFAULT; } static const struct file_operations aoe_fops = { .write = aoechr_write, .read = aoechr_read, .open = aoechr_open, .release = aoechr_rel, .owner = THIS_MODULE, .llseek = noop_llseek, }; static char *aoe_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev)); } int __init aoechr_init(void) { int n, i; n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops); if (n < 0) { printk(KERN_ERR "aoe: can't register char device\n"); return n; } init_completion(&emsgs_comp); spin_lock_init(&emsgs_lock); aoe_class = class_create(THIS_MODULE, "aoe"); if (IS_ERR(aoe_class)) { unregister_chrdev(AOE_MAJOR, "aoechr"); return PTR_ERR(aoe_class); } aoe_class->devnode = aoe_devnode; for (i = 0; i < ARRAY_SIZE(chardevs); ++i) device_create(aoe_class, NULL, MKDEV(AOE_MAJOR, chardevs[i].minor), NULL, chardevs[i].name); return 0; } void aoechr_exit(void) { int i; for (i = 0; i < ARRAY_SIZE(chardevs); ++i) device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor)); class_destroy(aoe_class); unregister_chrdev(AOE_MAJOR, "aoechr"); }
gpl-2.0
tailormoon/android_kernel_lge_msm8226_g2mds
arch/arm/kernel/patch.c
5126
1821
#include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/stop_machine.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include <asm/opcodes.h> #include <asm/mmu_writeable.h> #include "patch.h" struct patch { void *addr; unsigned int insn; }; void __kprobes __patch_text(void *addr, unsigned int insn) { bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); int size; unsigned long flags; mem_text_writeable_spinlock(&flags); mem_text_address_writeable((unsigned long)addr); if (thumb2 && __opcode_is_thumb16(insn)) { *(u16 *)addr = __opcode_to_mem_thumb16(insn); size = sizeof(u16); } else if (thumb2 && ((uintptr_t)addr & 2)) { u16 first = __opcode_thumb32_first(insn); u16 second = __opcode_thumb32_second(insn); u16 *addrh = addr; addrh[0] = __opcode_to_mem_thumb16(first); addrh[1] = __opcode_to_mem_thumb16(second); size = sizeof(u32); } else { if (thumb2) insn = __opcode_to_mem_thumb32(insn); else insn = __opcode_to_mem_arm(insn); *(u32 *)addr = insn; size = sizeof(u32); } flush_icache_range((uintptr_t)(addr), (uintptr_t)(addr) + size); mem_text_address_restore(); mem_text_writeable_spinunlock(&flags); } static int __kprobes patch_text_stop_machine(void *data) { struct patch *patch = data; __patch_text(patch->addr, patch->insn); return 0; } void __kprobes patch_text(void *addr, unsigned int insn) { struct patch patch = { .addr = addr, .insn = insn, }; if (cache_ops_need_broadcast()) { stop_machine(patch_text_stop_machine, &patch, cpu_online_mask); } else { bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL) && __opcode_is_thumb32(insn) && ((uintptr_t)addr & 2); if (straddles_word) stop_machine(patch_text_stop_machine, &patch, NULL); else __patch_text(addr, insn); } }
gpl-2.0
AOKP/kernel_sony_apq8064
drivers/usb/gadget/acm_ms.c
5126
6550
/* * acm_ms.c -- Composite driver, with ACM and mass storage support * * Copyright (C) 2008 David Brownell * Copyright (C) 2008 Nokia Corporation * Author: David Brownell * Modified: Klaus Schwarzkopf <schwarzkopf@sensortherm.de> * * Heavily based on multi.c and cdc2.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/utsname.h> #include "u_serial.h" #define DRIVER_DESC "Composite Gadget (ACM + MS)" #define DRIVER_VERSION "2011/10/10" /*-------------------------------------------------------------------------*/ /* * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ #define ACM_MS_VENDOR_NUM 0x1d6b /* Linux Foundation */ #define ACM_MS_PRODUCT_NUM 0x0106 /* Composite Gadget: ACM + MS*/ /*-------------------------------------------------------------------------*/ /* * Kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "composite.c" #include "usbstring.c" #include "config.c" #include "epautoconf.c" #include "u_serial.c" #include "f_acm.c" #include "f_mass_storage.c" /*-------------------------------------------------------------------------*/ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_MISC /* 0xEF */, .bDeviceSubClass = 2, .bDeviceProtocol = 1, /* .bMaxPacketSize0 = f(hardware) */ /* Vendor and product id can be overridden by module parameters. */ .idVendor = cpu_to_le16(ACM_MS_VENDOR_NUM), .idProduct = cpu_to_le16(ACM_MS_PRODUCT_NUM), /* .bcdDevice = f(hardware) */ /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ /* NO SERIAL NUMBER */ /*.bNumConfigurations = DYNAMIC*/ }; static struct usb_otg_descriptor otg_descriptor = { .bLength = sizeof otg_descriptor, .bDescriptorType = USB_DT_OTG, /* * REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }; static const struct usb_descriptor_header *otg_desc[] = { (struct usb_descriptor_header *) &otg_descriptor, NULL, }; /* string IDs are assigned dynamically */ #define STRING_MANUFACTURER_IDX 0 #define STRING_PRODUCT_IDX 1 static char manufacturer[50]; static struct usb_string strings_dev[] = { [STRING_MANUFACTURER_IDX].s = manufacturer, [STRING_PRODUCT_IDX].s = DRIVER_DESC, { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; /****************************** Configurations ******************************/ static struct fsg_module_parameters fsg_mod_data = { .stall = 1 }; FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data); static struct fsg_common fsg_common; /*-------------------------------------------------------------------------*/ /* * We _always_ have both ACM and mass storage functions. */ static int __init acm_ms_do_config(struct usb_configuration *c) { int status; if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } status = acm_bind_config(c, 0); if (status < 0) return status; status = fsg_bind_config(c->cdev, c, &fsg_common); if (status < 0) return status; return 0; } static struct usb_configuration acm_ms_config_driver = { .label = DRIVER_DESC, .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; /*-------------------------------------------------------------------------*/ static int __init acm_ms_bind(struct usb_composite_dev *cdev) { int gcnum; struct usb_gadget *gadget = cdev->gadget; int status; void *retp; /* set up serial link layer */ status = gserial_setup(cdev->gadget, 1); if (status < 0) return status; /* set up mass storage function */ retp = fsg_common_from_params(&fsg_common, cdev, &fsg_mod_data); if (IS_ERR(retp)) { status = PTR_ERR(retp); goto fail0; } /* set bcdDevice */ gcnum = usb_gadget_controller_number(gadget); if (gcnum >= 0) { device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum); } else { WARNING(cdev, "controller '%s' not recognized; trying %s\n", gadget->name, acm_ms_config_driver.label); device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099); } /* * Allocate string descriptor numbers ... note that string * contents can be overridden by the composite_dev glue. */ /* device descriptor strings: manufacturer, product */ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); status = usb_string_id(cdev); if (status < 0) goto fail1; strings_dev[STRING_MANUFACTURER_IDX].id = status; device_desc.iManufacturer = status; status = usb_string_id(cdev); if (status < 0) goto fail1; strings_dev[STRING_PRODUCT_IDX].id = status; device_desc.iProduct = status; /* register our configuration */ status = usb_add_config(cdev, &acm_ms_config_driver, acm_ms_do_config); if (status < 0) goto fail1; dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n", DRIVER_DESC); fsg_common_put(&fsg_common); return 0; /* error recovery */ fail1: fsg_common_put(&fsg_common); fail0: gserial_cleanup(); return status; } static int __exit acm_ms_unbind(struct usb_composite_dev *cdev) { gserial_cleanup(); return 0; } static struct usb_composite_driver acm_ms_driver = { .name = "g_acm_ms", .dev = &device_desc, .strings = dev_strings, .unbind = __exit_p(acm_ms_unbind), }; MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Klaus Schwarzkopf <schwarzkopf@sensortherm.de>"); MODULE_LICENSE("GPL v2"); static int __init init(void) { return usb_composite_probe(&acm_ms_driver, acm_ms_bind); } module_init(init); static void __exit cleanup(void) { usb_composite_unregister(&acm_ms_driver); } module_exit(cleanup);
gpl-2.0
yajnab/android_kernel_htc_golfu
arch/arm/mach-w90x900/gpio.c
13830
3860
/* * linux/arch/arm/mach-w90x900/gpio.c * * Generic nuc900 GPIO handling * * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/io.h> #include <linux/gpio.h> #include <mach/hardware.h> #define GPIO_BASE (W90X900_VA_GPIO) #define GPIO_DIR (0x04) #define GPIO_OUT (0x08) #define GPIO_IN (0x0C) #define GROUPINERV (0x10) #define GPIO_GPIO(Nb) (0x00000001 << (Nb)) #define to_nuc900_gpio_chip(c) container_of(c, struct nuc900_gpio_chip, chip) #define NUC900_GPIO_CHIP(name, base_gpio, nr_gpio) \ { \ .chip = { \ .label = name, \ .direction_input = nuc900_dir_input, \ .direction_output = nuc900_dir_output, \ .get = nuc900_gpio_get, \ .set = nuc900_gpio_set, \ .base = base_gpio, \ .ngpio = nr_gpio, \ } \ } struct nuc900_gpio_chip { struct gpio_chip chip; void __iomem *regbase; /* Base of group register*/ spinlock_t gpio_lock; }; static int nuc900_gpio_get(struct gpio_chip *chip, unsigned offset) { struct nuc900_gpio_chip *nuc900_gpio = to_nuc900_gpio_chip(chip); void __iomem *pio = nuc900_gpio->regbase + GPIO_IN; unsigned int regval; regval = __raw_readl(pio); regval &= GPIO_GPIO(offset); return (regval != 0); } static void nuc900_gpio_set(struct gpio_chip *chip, unsigned offset, int val) { struct nuc900_gpio_chip *nuc900_gpio = to_nuc900_gpio_chip(chip); void __iomem *pio = nuc900_gpio->regbase + GPIO_OUT; unsigned int regval; unsigned long flags; spin_lock_irqsave(&nuc900_gpio->gpio_lock, flags); regval = __raw_readl(pio); if (val) regval |= GPIO_GPIO(offset); else regval &= ~GPIO_GPIO(offset); __raw_writel(regval, pio); spin_unlock_irqrestore(&nuc900_gpio->gpio_lock, flags); } static int nuc900_dir_input(struct gpio_chip *chip, unsigned offset) { struct nuc900_gpio_chip *nuc900_gpio = to_nuc900_gpio_chip(chip); void __iomem *pio = nuc900_gpio->regbase + GPIO_DIR; unsigned int regval; unsigned long flags; spin_lock_irqsave(&nuc900_gpio->gpio_lock, flags); regval = __raw_readl(pio); regval &= ~GPIO_GPIO(offset); __raw_writel(regval, pio); spin_unlock_irqrestore(&nuc900_gpio->gpio_lock, flags); return 0; } static int nuc900_dir_output(struct gpio_chip *chip, unsigned offset, int val) { struct nuc900_gpio_chip *nuc900_gpio = to_nuc900_gpio_chip(chip); void __iomem *outreg = nuc900_gpio->regbase + GPIO_OUT; void __iomem *pio = nuc900_gpio->regbase + GPIO_DIR; unsigned int regval; unsigned long flags; spin_lock_irqsave(&nuc900_gpio->gpio_lock, flags); regval = __raw_readl(pio); regval |= GPIO_GPIO(offset); __raw_writel(regval, pio); regval = __raw_readl(outreg); if (val) regval |= GPIO_GPIO(offset); else regval &= ~GPIO_GPIO(offset); __raw_writel(regval, outreg); spin_unlock_irqrestore(&nuc900_gpio->gpio_lock, flags); return 0; } static struct nuc900_gpio_chip nuc900_gpio[] = { NUC900_GPIO_CHIP("GROUPC", 0, 16), NUC900_GPIO_CHIP("GROUPD", 16, 10), NUC900_GPIO_CHIP("GROUPE", 26, 14), NUC900_GPIO_CHIP("GROUPF", 40, 10), NUC900_GPIO_CHIP("GROUPG", 50, 17), NUC900_GPIO_CHIP("GROUPH", 67, 8), NUC900_GPIO_CHIP("GROUPI", 75, 17), }; void __init nuc900_init_gpio(int nr_group) { unsigned i; struct nuc900_gpio_chip *gpio_chip; for (i = 0; i < nr_group; i++) { gpio_chip = &nuc900_gpio[i]; spin_lock_init(&gpio_chip->gpio_lock); gpio_chip->regbase = GPIO_BASE + i * GROUPINERV; gpiochip_add(&gpio_chip->chip); } }
gpl-2.0
openSUSE/systemd
src/shutdown/test-umount.c
7
2518
/* SPDX-License-Identifier: LGPL-2.1-or-later */ #include "alloc-util.h" #include "errno-util.h" #include "log.h" #include "path-util.h" #include "string-util.h" #include "tests.h" #include "umount.h" #include "util.h" static void test_mount_points_list_one(const char *fname) { _cleanup_(mount_points_list_free) LIST_HEAD(MountPoint, mp_list_head); _cleanup_free_ char *testdata_fname = NULL; log_info("/* %s(\"%s\") */", __func__, fname ?: "/proc/self/mountinfo"); if (fname) { assert_se(get_testdata_dir(fname, &testdata_fname) >= 0); fname = testdata_fname; } LIST_HEAD_INIT(mp_list_head); assert_se(mount_points_list_get(fname, &mp_list_head) >= 0); LIST_FOREACH(mount_point, m, mp_list_head) log_debug("path=%s o=%s f=0x%lx try-ro=%s dev=%u:%u", m->path, strempty(m->remount_options), m->remount_flags, yes_no(m->try_remount_ro), major(m->devnum), minor(m->devnum)); } TEST(mount_points_list) { test_mount_points_list_one(NULL); test_mount_points_list_one("/test-umount/empty.mountinfo"); test_mount_points_list_one("/test-umount/garbled.mountinfo"); test_mount_points_list_one("/test-umount/rhbug-1554943.mountinfo"); } static void test_swap_list_one(const char *fname) { _cleanup_(mount_points_list_free) LIST_HEAD(MountPoint, mp_list_head); _cleanup_free_ char *testdata_fname = NULL; int r; log_info("/* %s(\"%s\") */", __func__, fname ?: "/proc/swaps"); if (fname) { assert_se(get_testdata_dir(fname, &testdata_fname) >= 0); fname = testdata_fname; } LIST_HEAD_INIT(mp_list_head); r = swap_list_get(fname, &mp_list_head); if (ERRNO_IS_PRIVILEGE(r)) return; assert_se(r >= 0); LIST_FOREACH(mount_point, m, mp_list_head) log_debug("path=%s o=%s f=0x%lx try-ro=%s dev=%u:%u", m->path, strempty(m->remount_options), m->remount_flags, yes_no(m->try_remount_ro), major(m->devnum), minor(m->devnum)); } TEST(swap_list) { test_swap_list_one(NULL); test_swap_list_one("/test-umount/example.swaps"); } DEFINE_TEST_MAIN(LOG_DEBUG);
gpl-2.0
TheTypoMaster/calligra
libs/widgets/KoLineStyleModel.cpp
7
3073
/* This file is part of the KDE project * Copyright (C) 2007 Jan Hambrecht <jaham@gmx.net> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * along with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include "KoLineStyleModel_p.h" #include <QPen> KoLineStyleModel::KoLineStyleModel(QObject *parent) : QAbstractListModel(parent), m_hasTempStyle(false) { // add standard dash patterns for (int i = Qt::NoPen; i < Qt::CustomDashLine; i++) { QPen pen(static_cast<Qt::PenStyle>(i)); m_styles << pen.dashPattern(); } } int KoLineStyleModel::rowCount(const QModelIndex &/*parent*/) const { return m_styles.count() + (m_hasTempStyle ? 1 : 0); } QVariant KoLineStyleModel::data(const QModelIndex &index, int role) const { if (!index.isValid()) return QVariant(); switch(role) { case Qt::DecorationRole: { QPen pen(Qt::black); pen.setWidth(2); if (index.row() < Qt::CustomDashLine) pen.setStyle(static_cast<Qt::PenStyle>(index.row())); else if (index.row() < m_styles.count()) pen.setDashPattern(m_styles[index.row()]); else if (m_hasTempStyle) pen.setDashPattern(m_tempStyle); else pen.setStyle(Qt::NoPen); return QVariant(pen); } case Qt::SizeHintRole: return QSize(100, 15); default: return QVariant(); } } bool KoLineStyleModel::addCustomStyle(const QVector<qreal> &style) { if (m_styles.contains(style)) return false; m_styles.append(style); return true; } int KoLineStyleModel::setLineStyle(Qt::PenStyle style, const QVector<qreal> &dashes) { // check if we select a standard or custom style if (style < Qt::CustomDashLine) { // a standard style m_hasTempStyle = false; reset(); return style; } else if (style == Qt::CustomDashLine) { // a custom style -> check if already added int index = m_styles.indexOf(dashes, Qt::CustomDashLine); if (index < 0) { // not already added -> add temporarly m_tempStyle = dashes; m_hasTempStyle = true; reset(); return m_styles.count(); } else { // already added -> return index m_hasTempStyle = false; reset(); return index; } } return -1; }
gpl-2.0
meatchicken/suricata
src/source-erf-file.c
7
8600
/* Copyright (C) 2010-2014 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free * Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * version 2 along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ /** * \file * * \author Endace Technology Limited. * * Support for reading ERF files. * * Only ethernet supported at this time. */ #include "suricata-common.h" #include "suricata.h" #include "tm-threads.h" #define DAG_TYPE_ETH 2 typedef struct DagFlags_ { uint8_t iface:2; uint8_t vlen:1; uint8_t trunc:1; uint8_t rxerror:1; uint8_t dserror:1; uint8_t reserved:1; uint8_t direction:1; } DagFlags; typedef struct DagRecord_ { uint64_t ts; uint8_t type; DagFlags flags; uint16_t rlen; uint16_t lctr; uint16_t wlen; uint16_t pad; } __attribute__((packed)) DagRecord; typedef struct ErfFileThreadVars_ { ThreadVars *tv; TmSlot *slot; FILE *erf; uint32_t pkts; uint64_t bytes; } ErfFileThreadVars; static inline TmEcode ReadErfRecord(ThreadVars *, Packet *, void *); TmEcode ReceiveErfFileLoop(ThreadVars *, void *, void *); TmEcode ReceiveErfFileThreadInit(ThreadVars *, void *, void **); void ReceiveErfFileThreadExitStats(ThreadVars *, void *); TmEcode ReceiveErfFileThreadDeinit(ThreadVars *, void *); TmEcode DecodeErfFileThreadInit(ThreadVars *, void *, void **); TmEcode DecodeErfFileThreadDeinit(ThreadVars *tv, void *data); TmEcode DecodeErfFile(ThreadVars *, Packet *, void *, PacketQueue *, PacketQueue *); /** * \brief Register the ERF file receiver (reader) module. */ void TmModuleReceiveErfFileRegister(void) { tmm_modules[TMM_RECEIVEERFFILE].name = "ReceiveErfFile"; tmm_modules[TMM_RECEIVEERFFILE].ThreadInit = ReceiveErfFileThreadInit; tmm_modules[TMM_RECEIVEERFFILE].Func = NULL; tmm_modules[TMM_RECEIVEERFFILE].PktAcqLoop = ReceiveErfFileLoop; tmm_modules[TMM_RECEIVEERFFILE].ThreadExitPrintStats = ReceiveErfFileThreadExitStats; tmm_modules[TMM_RECEIVEERFFILE].ThreadDeinit = NULL; tmm_modules[TMM_RECEIVEERFFILE].RegisterTests = NULL; tmm_modules[TMM_RECEIVEERFFILE].cap_flags = 0; tmm_modules[TMM_RECEIVEERFFILE].flags = TM_FLAG_RECEIVE_TM; } /** * \brief Register the ERF file decoder module. */ void TmModuleDecodeErfFileRegister(void) { tmm_modules[TMM_DECODEERFFILE].name = "DecodeErfFile"; tmm_modules[TMM_DECODEERFFILE].ThreadInit = DecodeErfFileThreadInit; tmm_modules[TMM_DECODEERFFILE].Func = DecodeErfFile; tmm_modules[TMM_DECODEERFFILE].ThreadExitPrintStats = NULL; tmm_modules[TMM_DECODEERFFILE].ThreadDeinit = DecodeErfFileThreadDeinit; tmm_modules[TMM_DECODEERFFILE].RegisterTests = NULL; tmm_modules[TMM_DECODEERFFILE].cap_flags = 0; tmm_modules[TMM_DECODEERFFILE].flags = TM_FLAG_DECODE_TM; } /** * \brief ERF file reading loop. */ TmEcode ReceiveErfFileLoop(ThreadVars *tv, void *data, void *slot) { Packet *p = NULL; ErfFileThreadVars *etv = (ErfFileThreadVars *)data; etv->slot = ((TmSlot *)slot)->slot_next; while (1) { if (suricata_ctl_flags & (SURICATA_STOP | SURICATA_KILL)) { SCReturnInt(TM_ECODE_OK); } /* Make sure we have at least one packet in the packet pool, * to prevent us from alloc'ing packets at line rate. */ PacketPoolWait(); p = PacketGetFromQueueOrAlloc(); if (unlikely(p == NULL)) { SCLogError(SC_ERR_MEM_ALLOC, "Failed to allocate a packet."); EngineStop(); SCReturnInt(TM_ECODE_FAILED); } PKT_SET_SRC(p, PKT_SRC_WIRE); if (ReadErfRecord(tv, p, data) != TM_ECODE_OK) { TmqhOutputPacketpool(etv->tv, p); EngineStop(); SCReturnInt(TM_ECODE_FAILED); } if (TmThreadsSlotProcessPkt(etv->tv, etv->slot, p) != TM_ECODE_OK) { EngineStop(); SCReturnInt(TM_ECODE_FAILED); } } SCReturnInt(TM_ECODE_FAILED); } static inline TmEcode ReadErfRecord(ThreadVars *tv, Packet *p, void *data) { SCEnter(); ErfFileThreadVars *etv = (ErfFileThreadVars *)data; DagRecord dr; int r = fread(&dr, sizeof(DagRecord), 1, etv->erf); if (r < 1) { if (feof(etv->erf)) { SCLogInfo("End of ERF file reached"); } else { SCLogInfo("Error reading ERF record"); } SCReturnInt(TM_ECODE_FAILED); } int rlen = ntohs(dr.rlen); int wlen = ntohs(dr.wlen); r = fread(GET_PKT_DATA(p), rlen - sizeof(DagRecord), 1, etv->erf); if (r < 1) { if (feof(etv->erf)) { SCLogInfo("End of ERF file reached"); } else { SCLogInfo("Error reading ERF record"); } SCReturnInt(TM_ECODE_FAILED); } /* Only support ethernet at this time. */ if (dr.type != DAG_TYPE_ETH) { SCLogError(SC_ERR_UNIMPLEMENTED, "DAG record type %d not implemented.", dr.type); SCReturnInt(TM_ECODE_FAILED); } GET_PKT_LEN(p) = wlen; p->datalink = LINKTYPE_ETHERNET; /* Convert ERF time to timeval - from libpcap. */ uint64_t ts = dr.ts; p->ts.tv_sec = ts >> 32; ts = (ts & 0xffffffffULL) * 1000000; ts += 0x80000000; /* rounding */ p->ts.tv_usec = ts >> 32; if (p->ts.tv_usec >= 1000000) { p->ts.tv_usec -= 1000000; p->ts.tv_sec++; } etv->pkts++; etv->bytes += wlen; SCReturnInt(TM_ECODE_OK); } /** * \brief Initialize the ERF receiver thread. */ TmEcode ReceiveErfFileThreadInit(ThreadVars *tv, void *initdata, void **data) { SCEnter(); if (initdata == NULL) { SCLogError(SC_ERR_INVALID_ARGUMENT, "Error: No filename provided."); SCReturnInt(TM_ECODE_FAILED); } FILE *erf = fopen((const char *)initdata, "r"); if (erf == NULL) { SCLogError(SC_ERR_FOPEN, "Failed to open %s: %s", (char *)initdata, strerror(errno)); exit(EXIT_FAILURE); } ErfFileThreadVars *etv = SCMalloc(sizeof(ErfFileThreadVars)); if (unlikely(etv == NULL)) { SCLogError(SC_ERR_MEM_ALLOC, "Failed to allocate memory for ERF file thread vars."); fclose(erf); SCReturnInt(TM_ECODE_FAILED); } memset(etv, 0, sizeof(*etv)); etv->erf = erf; etv->tv = tv; *data = (void *)etv; SCLogInfo("Processing ERF file %s", (char *)initdata); SCReturnInt(TM_ECODE_OK); } /** * \brief Initialize the ERF decoder thread. */ TmEcode DecodeErfFileThreadInit(ThreadVars *tv, void *initdata, void **data) { SCEnter(); DecodeThreadVars *dtv = NULL; dtv = DecodeThreadVarsAlloc(tv); if (dtv == NULL) SCReturnInt(TM_ECODE_FAILED); DecodeRegisterPerfCounters(dtv, tv); *data = (void *)dtv; SCReturnInt(TM_ECODE_OK); } TmEcode DecodeErfFileThreadDeinit(ThreadVars *tv, void *data) { if (data != NULL) DecodeThreadVarsFree(tv, data); SCReturnInt(TM_ECODE_OK); } /** * \brief Decode the ERF file. * * This function ups the decoder counters and then passes the packet * off to the ethernet decoder. */ TmEcode DecodeErfFile(ThreadVars *tv, Packet *p, void *data, PacketQueue *pq, PacketQueue *postpq) { SCEnter(); DecodeThreadVars *dtv = (DecodeThreadVars *)data; /* XXX HACK: flow timeout can call us for injected pseudo packets * see bug: https://redmine.openinfosecfoundation.org/issues/1107 */ if (p->flags & PKT_PSEUDO_STREAM_END) return TM_ECODE_OK; /* Update counters. */ DecodeUpdatePacketCounters(tv, dtv, p); DecodeEthernet(tv, dtv, p, GET_PKT_DATA(p), GET_PKT_LEN(p), pq); PacketDecodeFinalize(tv, dtv, p); SCReturnInt(TM_ECODE_OK); } /** * \brief Print some stats to the log at program exit. * * \param tv Pointer to ThreadVars. * \param data Pointer to data, ErfFileThreadVars. */ void ReceiveErfFileThreadExitStats(ThreadVars *tv, void *data) { ErfFileThreadVars *etv = (ErfFileThreadVars *)data; SCLogInfo("Packets: %"PRIu32"; Bytes: %"PRIu64, etv->pkts, etv->bytes); }
gpl-2.0
uucidl/hammer
src/t_grammar.c
7
1159
#include <glib.h> #include "hammer.h" #include "internal.h" #include "cfgrammar.h" #include "test_suite.h" static void test_end(void) { const HParser *p = h_end_p(); HCFGrammar *g = h_cfgrammar(&system_allocator, p); g_check_hashtable_size(g->nts, 1); g_check_hashtable_size(g->geneps, 0); g_check_derives_epsilon_not(g, p); } static void test_example_1(void) { HParser *c = h_many(h_ch('x')); HParser *q = h_sequence(c, h_ch('y'), NULL); HParser *p = h_choice(q, h_end_p(), NULL); HCFGrammar *g = h_cfgrammar(&system_allocator, p); g_check_nonterminal(g, c); g_check_nonterminal(g, q); g_check_nonterminal(g, p); g_check_derives_epsilon(g, c); g_check_derives_epsilon_not(g, q); g_check_derives_epsilon_not(g, p); g_check_firstset_present(1, g, p, "$"); g_check_firstset_present(1, g, p, "x"); g_check_firstset_present(1, g, p, "y"); g_check_followset_absent(1, g, c, "$"); g_check_followset_absent(1, g, c, "x"); g_check_followset_present(1, g, c, "y"); } void register_grammar_tests(void) { g_test_add_func("/core/grammar/end", test_end); g_test_add_func("/core/grammar/example_1", test_example_1); }
gpl-2.0
XMelancholy/android_kernel_sony_u8500
sound/usb/card.c
7
20191
/* * (Tentative) USB Audio Driver for ALSA * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * Many codes borrowed from audio.c by * Alan Cox (alan@lxorguk.ukuu.org.uk) * Thomas Sailer (sailer@ife.ee.ethz.ch) * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * NOTES: * * - async unlink should be used for avoiding the sleep inside lock. * 2.4.22 usb-uhci seems buggy for async unlinking and results in * oops. in such a cse, pass async_unlink=0 option. * - the linked URBs would be preferred but not used so far because of * the instability of unlinking. * - type II is not supported properly. there is no device which supports * this type *correctly*. SB extigy looks as if it supports, but it's * indeed an AC3 stream packed in SPDIF frames (i.e. no real AC3 stream). */ #include <linux/bitops.h> #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/usb.h> #include <linux/moduleparam.h> #include <linux/mutex.h> #include <linux/usb/audio.h> #include <linux/usb/audio-v2.h> #include <linux/module.h> #include <linux/switch.h> #include <sound/control.h> #include <sound/core.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include "usbaudio.h" #include "card.h" #include "midi.h" #include "mixer.h" #include "proc.h" #include "quirks.h" #include "endpoint.h" #include "helper.h" #include "debug.h" #include "pcm.h" #include "format.h" #include "power.h" #include "stream.h" MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("USB Audio"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Generic,USB Audio}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;/* Enable this card */ /* Vendor/product IDs for this card */ static int vid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 }; static int pid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 }; static int nrpacks = 8; /* max. number of packets per urb */ static bool async_unlink = 1; static int device_setup[SNDRV_CARDS]; /* device parameter for this card */ static bool ignore_ctl_error; struct switch_dev switch_audio_detection; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the USB audio adapter."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the USB audio adapter."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable USB audio adapter."); module_param_array(vid, int, NULL, 0444); MODULE_PARM_DESC(vid, "Vendor ID for the USB audio device."); module_param_array(pid, int, NULL, 0444); MODULE_PARM_DESC(pid, "Product ID for the USB audio device."); module_param(nrpacks, int, 0644); MODULE_PARM_DESC(nrpacks, "Max. number of packets per URB."); module_param(async_unlink, bool, 0444); MODULE_PARM_DESC(async_unlink, "Use async unlink mode."); module_param_array(device_setup, int, NULL, 0444); MODULE_PARM_DESC(device_setup, "Specific device setup (if needed)."); module_param(ignore_ctl_error, bool, 0444); MODULE_PARM_DESC(ignore_ctl_error, "Ignore errors from USB controller for mixer interfaces."); /* * we keep the snd_usb_audio_t instances by ourselves for merging * the all interfaces on the same card as one sound device. */ static DEFINE_MUTEX(register_mutex); static struct snd_usb_audio *usb_chip[SNDRV_CARDS]; static struct usb_driver usb_audio_driver; /* * disconnect streams * called from snd_usb_audio_disconnect() */ static void snd_usb_stream_disconnect(struct list_head *head) { int idx; struct snd_usb_stream *as; struct snd_usb_substream *subs; as = list_entry(head, struct snd_usb_stream, list); for (idx = 0; idx < 2; idx++) { subs = &as->substream[idx]; if (!subs->num_formats) continue; snd_usb_release_substream_urbs(subs, 1); subs->interface = -1; } } static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int interface) { struct usb_device *dev = chip->dev; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; struct usb_interface *iface = usb_ifnum_to_if(dev, interface); if (!iface) { snd_printk(KERN_ERR "%d:%u:%d : does not exist\n", dev->devnum, ctrlif, interface); return -EINVAL; } if (usb_interface_claimed(iface)) { snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n", dev->devnum, ctrlif, interface); return -EINVAL; } alts = &iface->altsetting[0]; altsd = get_iface_desc(alts); if ((altsd->bInterfaceClass == USB_CLASS_AUDIO || altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) && altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) { int err = snd_usbmidi_create(chip->card, iface, &chip->midi_list, NULL); if (err < 0) { snd_printk(KERN_ERR "%d:%u:%d: cannot create sequencer device\n", dev->devnum, ctrlif, interface); return -EINVAL; } usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); return 0; } if ((altsd->bInterfaceClass != USB_CLASS_AUDIO && altsd->bInterfaceClass != USB_CLASS_VENDOR_SPEC) || altsd->bInterfaceSubClass != USB_SUBCLASS_AUDIOSTREAMING) { snd_printdd(KERN_ERR "%d:%u:%d: skipping non-supported interface %d\n", dev->devnum, ctrlif, interface, altsd->bInterfaceClass); /* skip non-supported classes */ return -EINVAL; } if (snd_usb_get_speed(dev) == USB_SPEED_LOW) { snd_printk(KERN_ERR "low speed audio streaming not supported\n"); return -EINVAL; } if (! snd_usb_parse_audio_interface(chip, interface)) { usb_set_interface(dev, interface, 0); /* reset the current interface */ usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); return -EINVAL; } return 0; } /* * parse audio control descriptor and create pcm/midi streams */ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) { struct usb_device *dev = chip->dev; struct usb_host_interface *host_iface; struct usb_interface_descriptor *altsd; void *control_header; int i, protocol; /* find audiocontrol interface */ host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0]; control_header = snd_usb_find_csint_desc(host_iface->extra, host_iface->extralen, NULL, UAC_HEADER); altsd = get_iface_desc(host_iface); protocol = altsd->bInterfaceProtocol; if (!control_header) { snd_printk(KERN_ERR "cannot find UAC_HEADER\n"); return -EINVAL; } switch (protocol) { default: snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n", protocol); /* fall through */ case UAC_VERSION_1: { struct uac1_ac_header_descriptor *h1 = control_header; if (!h1->bInCollection) { snd_printk(KERN_INFO "skipping empty audio interface (v1)\n"); return -EINVAL; } if (h1->bLength < sizeof(*h1) + h1->bInCollection) { snd_printk(KERN_ERR "invalid UAC_HEADER (v1)\n"); return -EINVAL; } for (i = 0; i < h1->bInCollection; i++) snd_usb_create_stream(chip, ctrlif, h1->baInterfaceNr[i]); break; } case UAC_VERSION_2: { struct usb_interface_assoc_descriptor *assoc = usb_ifnum_to_if(dev, ctrlif)->intf_assoc; if (!assoc) { snd_printk(KERN_ERR "Audio class v2 interfaces need an interface association\n"); return -EINVAL; } for (i = 0; i < assoc->bInterfaceCount; i++) { int intf = assoc->bFirstInterface + i; if (intf != ctrlif) snd_usb_create_stream(chip, ctrlif, intf); } break; } } return 0; } /* * free the chip instance * * here we have to do not much, since pcm and controls are already freed * */ static int snd_usb_audio_free(struct snd_usb_audio *chip) { kfree(chip); return 0; } static int snd_usb_audio_dev_free(struct snd_device *device) { struct snd_usb_audio *chip = device->device_data; return snd_usb_audio_free(chip); } static void remove_trailing_spaces(char *str) { char *p; if (!*str) return; for (p = str + strlen(str) - 1; p >= str && isspace(*p); p--) *p = 0; } /* * create a chip instance and set its names. */ static int snd_usb_audio_create(struct usb_device *dev, int idx, const struct snd_usb_audio_quirk *quirk, struct snd_usb_audio **rchip) { struct snd_card *card; struct snd_usb_audio *chip; int err, len; char component[14]; static struct snd_device_ops ops = { .dev_free = snd_usb_audio_dev_free, }; *rchip = NULL; switch (snd_usb_get_speed(dev)) { case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: case USB_SPEED_SUPER: break; default: snd_printk(KERN_ERR "unknown device speed %d\n", snd_usb_get_speed(dev)); return -ENXIO; } err = snd_card_create(index[idx], id[idx], THIS_MODULE, 0, &card); if (err < 0) { snd_printk(KERN_ERR "cannot create card instance %d\n", idx); return err; } chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (! chip) { snd_card_free(card); return -ENOMEM; } mutex_init(&chip->shutdown_mutex); chip->index = idx; chip->dev = dev; chip->card = card; chip->setup = device_setup[idx]; chip->nrpacks = nrpacks; chip->async_unlink = async_unlink; chip->probing = 1; chip->usb_id = USB_ID(le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); INIT_LIST_HEAD(&chip->pcm_list); INIT_LIST_HEAD(&chip->midi_list); INIT_LIST_HEAD(&chip->mixer_list); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_usb_audio_free(chip); snd_card_free(card); return err; } strcpy(card->driver, "USB-Audio"); sprintf(component, "USB%04x:%04x", USB_ID_VENDOR(chip->usb_id), USB_ID_PRODUCT(chip->usb_id)); snd_component_add(card, component); /* retrieve the device string as shortname */ if (quirk && quirk->product_name && *quirk->product_name) { strlcpy(card->shortname, quirk->product_name, sizeof(card->shortname)); } else { if (!dev->descriptor.iProduct || usb_string(dev, dev->descriptor.iProduct, card->shortname, sizeof(card->shortname)) <= 0) { /* no name available from anywhere, so use ID */ sprintf(card->shortname, "USB Device %#04x:%#04x", USB_ID_VENDOR(chip->usb_id), USB_ID_PRODUCT(chip->usb_id)); } } remove_trailing_spaces(card->shortname); /* retrieve the vendor and device strings as longname */ if (quirk && quirk->vendor_name && *quirk->vendor_name) { len = strlcpy(card->longname, quirk->vendor_name, sizeof(card->longname)); } else { if (dev->descriptor.iManufacturer) len = usb_string(dev, dev->descriptor.iManufacturer, card->longname, sizeof(card->longname)); else len = 0; /* we don't really care if there isn't any vendor string */ } if (len > 0) { remove_trailing_spaces(card->longname); if (*card->longname) strlcat(card->longname, " ", sizeof(card->longname)); } strlcat(card->longname, card->shortname, sizeof(card->longname)); len = strlcat(card->longname, " at ", sizeof(card->longname)); if (len < sizeof(card->longname)) usb_make_path(dev, card->longname + len, sizeof(card->longname) - len); switch (snd_usb_get_speed(dev)) { case USB_SPEED_LOW: strlcat(card->longname, ", low speed", sizeof(card->longname)); break; case USB_SPEED_FULL: strlcat(card->longname, ", full speed", sizeof(card->longname)); break; case USB_SPEED_HIGH: strlcat(card->longname, ", high speed", sizeof(card->longname)); break; case USB_SPEED_SUPER: strlcat(card->longname, ", super speed", sizeof(card->longname)); break; default: break; } snd_usb_audio_create_proc(chip); *rchip = chip; return 0; } /* * probe the active usb device * * note that this can be called multiple times per a device, when it * includes multiple audio control interfaces. * * thus we check the usb device pointer and creates the card instance * only at the first time. the successive calls of this function will * append the pcm interface to the corresponding card. */ static struct snd_usb_audio * snd_usb_audio_probe(struct usb_device *dev, struct usb_interface *intf, const struct usb_device_id *usb_id) { const struct snd_usb_audio_quirk *quirk = (const struct snd_usb_audio_quirk *)usb_id->driver_info; int i, err; struct snd_usb_audio *chip; struct usb_host_interface *alts; int ifnum; u32 id; struct usb_device *snd_dev; alts = &intf->altsetting[0]; ifnum = get_iface_desc(alts)->bInterfaceNumber; id = USB_ID(le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); if (quirk && quirk->ifnum >= 0 && ifnum != quirk->ifnum) goto __err_val; if (snd_usb_apply_boot_quirk(dev, intf, quirk) < 0) goto __err_val; /* * found a config. now register to ALSA */ /* check whether it's already registered */ chip = NULL; mutex_lock(&register_mutex); for (i = 0; i < SNDRV_CARDS; i++) { if (usb_chip[i] && usb_chip[i]->dev == dev) { if (usb_chip[i]->shutdown) { snd_printk(KERN_ERR "USB device is in the shutdown state, cannot create a card instance\n"); goto __error; } chip = usb_chip[i]; chip->probing = 1; break; } } if (! chip) { /* it's a fresh one. * now look for an empty slot and create a new card instance */ for (i = 0; i < SNDRV_CARDS; i++) if (enable[i] && ! usb_chip[i] && (vid[i] == -1 || vid[i] == USB_ID_VENDOR(id)) && (pid[i] == -1 || pid[i] == USB_ID_PRODUCT(id))) { if (snd_usb_audio_create(dev, i, quirk, &chip) < 0) { goto __error; } snd_card_set_dev(chip->card, &intf->dev); chip->pm_intf = intf; break; } if (!chip) { printk(KERN_ERR "no available usb audio device\n"); goto __error; } } /* * For devices with more than one control interface, we assume the * first contains the audio controls. We might need a more specific * check here in the future. */ if (!chip->ctrl_intf) chip->ctrl_intf = alts; chip->txfr_quirk = 0; err = 1; /* continue */ if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) { /* need some special handlings */ if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0) goto __error; } if (err > 0) { /* create normal USB audio interfaces */ if (snd_usb_create_streams(chip, ifnum) < 0 || snd_usb_create_mixer(chip, ifnum, ignore_ctl_error) < 0) { goto __error; } } /* we are allowed to call snd_card_register() many times */ if (snd_card_register(chip->card) < 0) { goto __error; } usb_chip[chip->index] = chip; chip->num_interfaces++; chip->probing = 0; snd_dev = interface_to_usbdev(intf); usb_enable_autosuspend(snd_dev); mutex_unlock(&register_mutex); return chip; __error: if (chip) { if (!chip->num_interfaces) snd_card_free(chip->card); chip->probing = 0; } mutex_unlock(&register_mutex); __err_val: return NULL; } /* * we need to take care of counter, since disconnection can be called also * many times as well as usb_audio_probe(). */ static void snd_usb_audio_disconnect(struct usb_device *dev, struct snd_usb_audio *chip) { struct snd_card *card; struct list_head *p; if (chip == (void *)-1L) return; card = chip->card; mutex_lock(&register_mutex); mutex_lock(&chip->shutdown_mutex); chip->shutdown = 1; chip->num_interfaces--; if (chip->num_interfaces <= 0) { snd_card_disconnect(card); /* release the pcm resources */ list_for_each(p, &chip->pcm_list) { snd_usb_stream_disconnect(p); } /* release the midi resources */ list_for_each(p, &chip->midi_list) { snd_usbmidi_disconnect(p); } /* release mixer resources */ list_for_each(p, &chip->mixer_list) { snd_usb_mixer_disconnect(p); } usb_chip[chip->index] = NULL; mutex_unlock(&chip->shutdown_mutex); mutex_unlock(&register_mutex); snd_card_free_when_closed(card); } else { mutex_unlock(&chip->shutdown_mutex); mutex_unlock(&register_mutex); } } /* * new 2.5 USB kernel API */ static int usb_audio_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct snd_usb_audio *chip; chip = snd_usb_audio_probe(interface_to_usbdev(intf), intf, id); if (chip) { usb_set_intfdata(intf, chip); switch_set_state(&switch_audio_detection, 1); return 0; } else return -EIO; } static void usb_audio_disconnect(struct usb_interface *intf) { switch_set_state(&switch_audio_detection, 0); snd_usb_audio_disconnect(interface_to_usbdev(intf), usb_get_intfdata(intf)); } #ifdef CONFIG_PM int snd_usb_autoresume(struct snd_usb_audio *chip) { int err = -ENODEV; if (!chip->shutdown && !chip->probing) err = usb_autopm_get_interface(chip->pm_intf); return err; } void snd_usb_autosuspend(struct snd_usb_audio *chip) { if (!chip->shutdown && !chip->probing) usb_autopm_put_interface(chip->pm_intf); } static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) { struct snd_usb_audio *chip = usb_get_intfdata(intf); struct list_head *p; struct snd_usb_stream *as; struct usb_mixer_interface *mixer; if (chip == (void *)-1L) return 0; if (!PMSG_IS_AUTO(message)) { snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); if (!chip->num_suspended_intf++) { list_for_each(p, &chip->pcm_list) { as = list_entry(p, struct snd_usb_stream, list); snd_pcm_suspend_all(as->pcm); } } } else { /* * otherwise we keep the rest of the system in the dark * to keep this transparent */ if (!chip->num_suspended_intf++) chip->autosuspended = 1; } list_for_each_entry(mixer, &chip->mixer_list, list) snd_usb_mixer_inactivate(mixer); return 0; } static int usb_audio_resume(struct usb_interface *intf) { struct snd_usb_audio *chip = usb_get_intfdata(intf); struct usb_mixer_interface *mixer; int err = 0; if (chip == (void *)-1L) return 0; if (--chip->num_suspended_intf) return 0; /* * ALSA leaves material resumption to user space * we just notify and restart the mixers */ list_for_each_entry(mixer, &chip->mixer_list, list) { err = snd_usb_mixer_activate(mixer); if (err < 0) goto err_out; } if (!chip->autosuspended) snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); chip->autosuspended = 0; err_out: return err; } #else #define usb_audio_suspend NULL #define usb_audio_resume NULL #endif /* CONFIG_PM */ static struct usb_device_id usb_audio_ids [] = { #include "quirks-table.h" { .match_flags = (USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS), .bInterfaceClass = USB_CLASS_AUDIO, .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, usb_audio_ids); /* * entry point for linux usb interface */ static struct usb_driver usb_audio_driver = { .name = "snd-usb-audio", .probe = usb_audio_probe, .disconnect = usb_audio_disconnect, .suspend = usb_audio_suspend, .resume = usb_audio_resume, .id_table = usb_audio_ids, .supports_autosuspend = 1, }; static int __init snd_usb_audio_init(void) { if (nrpacks < 1 || nrpacks > MAX_PACKS) { printk(KERN_WARNING "invalid nrpacks value.\n"); return -EINVAL; } switch_audio_detection.name = "usb_audio"; switch_dev_register(&switch_audio_detection); return usb_register(&usb_audio_driver); } static void __exit snd_usb_audio_cleanup(void) { switch_dev_unregister(&switch_audio_detection); usb_deregister(&usb_audio_driver); } module_init(snd_usb_audio_init); module_exit(snd_usb_audio_cleanup);
gpl-2.0
ppriest/mame
src/devices/cpu/spc700/spc700.cpp
7
72250
// license:BSD-3-Clause // copyright-holders:R. Belmont, Karl Stenerud /* ======================================================================== */ /* =============================== COPYRIGHT ============================== */ /* ======================================================================== */ /* Sony/Nintendo SPC700 CPU Emulator The SPC700 is 6502-based at heart but contains a lot of the extended opcodes of the Mitsubishi 770 and 7700 series 65xxx-based MCUs, plus a few special twists borrowed from the 68000. It was designed by Sony's Ken Kutaragi, later the "father of the PlayStation". Original emulation by Anthony Kruize and Lee Hammerton. Substantially revised by R. Belmont. Thanks to Anonymous, TRAC, Brad Martin, anomie, Blargg, and everyone else on ZSNES Technical for probing the darker corners of the SNES with test programs so we have a chance at getting things accurate. MESS Bugzilla bugs: - 804 ADC sets carry too late (FIXED) - 805 ADDW/SUBW set V wrongly (FIXED) - 806 BRK should modify PSW (FIXED) - 807 DAA/DAS problem (FIXED) */ /* ======================================================================== */ /* ================================= NOTES ================================ */ /* ======================================================================== */ /* snes mapped ports: f0-ff Address Function Register R/W When Reset Remarks 00F0H (test) --- ------ Installed in sound-CPU 00F1H Control W Control = "00-000" 00F2H Register Add. R/W Indeterminate Installed in DSP 00F3H Register Data R/W Indeterminate Installed in DSP 00F4H Port-0 R/W Port0r = "00" Installed in sound-CPU Port0w = "00" 00F5H Port-1 R/W Port1r = "00" Installed in sound-CPU Port1w = "00" 00F6H Port-2 R/W Port2r = "00" Installed in sound-CPU Port2w = "00" 00F7H Port-3 R/W Port3r = "00" Installed in sound-CPU Port3w = "00" 00F8H ------ --- ---------- ------------------- 00F9H ------ --- ---------- ------------------- 00FAH Timer-0 W Indeterminate Installed in sound-CPU 00FBH Timer-1 W Indeterminate Installed in sound-CPU 00FCH Timer-2 W Indeterminate Installed in sound-CPU 00FDH Counter-0 W Indeterminate Installed in sound-CPU 00FEH Counter-1 W Indeterminate Installed in sound-CPU 00FFH Counter-2 W Indeterminate Installed in sound-CPU */ /* ======================================================================== */ /* ================================ INCLUDES ============================== */ /* ======================================================================== */ #include <limits.h> #include "emu.h" #include "debugger.h" #include "spc700.h" /* ======================================================================== */ /* ==================== ARCHITECTURE-DEPENDANT DEFINES ==================== */ /* ======================================================================== */ #undef int8 /* Allow for architectures that don't have 8-bit sizes */ #if UCHAR_MAX == 0xff #define int8 char #define MAKE_INT_8(A) (int8)((A)&0xff) #else #define int8 int static inline int MAKE_INT_8(int A) {return (A & 0x80) ? A | ~0xff : A & 0xff;} #endif /* UCHAR_MAX == 0xff */ #define MAKE_UINT_8(A) ((A)&0xff) #define MAKE_UINT_16(A) ((A)&0xffff) /* ======================================================================== */ /* ============================ GENERAL DEFINES =========================== */ /* ======================================================================== */ /* Bits */ #define BIT_0 0x01 #define BIT_1 0x02 #define BIT_2 0x04 #define BIT_3 0x08 #define BIT_4 0x10 #define BIT_5 0x20 #define BIT_6 0x40 #define BIT_7 0x80 #define BIT_8 0x100 /* Flag positions in Processor Status Register */ #define FLAGPOS_N BIT_7 /* Negative */ #define FLAGPOS_V BIT_6 /* Overflow */ #define FLAGPOS_P BIT_5 /* Direct Page Selector */ #define FLAGPOS_B BIT_4 /* Break */ #define FLAGPOS_H BIT_3 /* Half-carry */ #define FLAGPOS_I BIT_2 /* Interrupt */ #define FLAGPOS_Z BIT_1 /* Zero */ #define FLAGPOS_C BIT_0 /* Carry */ #define NFLAG_SET FLAGPOS_N #define VFLAG_SET BIT_7 #define PFLAG_SET BIT_8 #define BFLAG_SET FLAGPOS_B #define HFLAG_SET BIT_3 #define IFLAG_SET FLAGPOS_I #define ZFLAG_SET 0 #define CFLAG_SET BIT_8 #define NZFLAG_CLEAR 1 #define VFLAG_CLEAR 0 #define PFLAG_CLEAR 0 #define BFLAG_CLEAR 0 #define HFLAG_CLEAR 0 #define IFLAG_CLEAR 0 #define CFLAG_CLEAR 0 #define NMI_SET 1 #define NMI_CLEAR 0 #define IRQ_SET IFLAG_CLEAR #define IRQ_CLEAR IFLAG_SET #define STACK_PAGE 0x100 /* Stack Page Offset */ #define VECTOR_RST 0xfffe /* Reset */ #define VECTOR_BRK 0xffde /* Break Instruction */ #define VECTOR_IRQ 0xfffc /* IRQ ??? what is real vector? */ #define VECTOR_NMI 0xfffa /* NMI ??? what is real vector? */ #define REG_A m_a /* Accumulator */ #define REG_X m_x /* Index X Register */ #define REG_Y m_y /* Index Y Register */ #define REG_S m_s /* Stack Pointer */ #define REG_PC m_pc /* Program Counter */ #define REG_PPC m_ppc /* Previous Program Counter */ #define REG_P m_p /* Processor Status Register */ #define FLAG_NZ m_flag_n = m_flag_z /* Negative Flag and inverted Zero flag */ #define FLAG_N m_flag_n /* Negative flag */ #define FLAG_Z m_flag_z /* Inverted Zero flag */ #define FLAG_V m_flag_v /* Overflow Flag */ #define FLAG_P m_flag_p /* Direct Page Flag */ #define FLAG_B m_flag_b /* BRK Instruction Flag */ #define FLAG_H m_flag_h /* Decimal Mode Flag */ #define FLAG_I m_flag_i /* Interrupt Mask Flag */ #define FLAG_C m_flag_c /* Carry Flag */ #define LINE_IRQ m_line_irq /* Status of the IRQ line */ #define LINE_NMI m_line_nmi /* Status of the NMI line */ #define REG_IR m_ir /* Instruction Register */ #define CLOCKS m_ICount /* Clock cycles remaining */ #define CPU_STOPPED m_stopped /* Stopped status */ #define SRC m_source /* Source Operand */ #define DST m_destination /* Destination Operand */ #define TMP1 m_temp1 /* temporary result 1 */ #define TMP2 m_temp2 /* temporary result 2 */ #define TMP3 m_temp3 /* temporary result 3 */ #define STOP_LEVEL_STOP 1 #define STOP_LEVEL_SLEEP 2 /* ======================================================================== */ /* ============================ GENERAL MACROS ============================ */ /* ======================================================================== */ /* Codition code tests */ #define COND_CC() (!(FLAG_C&0x100)) /* Carry Clear */ #define COND_CS() (FLAG_C&0x100) /* Carry Set */ #define COND_EQ() (!FLAG_Z) /* Equal */ #define COND_NE() (FLAG_Z) /* Not Equal */ #define COND_MI() (FLAG_N&0x80) /* Minus */ #define COND_PL() (!(FLAG_N&0x80)) /* Plus */ #define COND_VC() (!(FLAG_V&0x80)) /* Overflow Clear */ #define COND_VS() (FLAG_V&0x80) /* Overflow Set */ /* Set Overflow flag in math operations */ #define VFLAG_ADD_8(S, D, R) ((S^R) & (D^R)) #define VFLAG_ADD_16(S, D, R) (((S^R) & (D^R))>>8) #define VFLAG_SUB_8(S, D, R) ((S^D) & (R^D)) #define VFLAG_SUB_16(S, D, R) (((S^D) & (R^D))>>8) #define CFLAG_AS_1() ((FLAG_C>>8)&1) #define CFLAG_AS_NOT_1() (!(FLAG_C&CFLAG_SET)) #define NZFLAG_16(A) (((A)&0x7f) | (((A)>>1)&0x40) | (((A)>>8)&0xff)) #define CFLAG_16(A) ((A)>>8) /* ======================================================================== */ /* ================================= MAME ================================= */ /* ======================================================================== */ #define spc700_read_8(addr) m_program->read_byte(addr) #define spc700_write_8(addr,data) m_program->write_byte(addr,data) #define spc700_read_8_direct(A) spc700_read_8(A) #define spc700_write_8_direct(A, V) spc700_write_8(A, V) //#define spc700_read_instruction(A) memory_decrypted_read_byte(m_program,A) //#define spc700_read_8_immediate(A) memory_raw_read_byte(m_program,A) #define spc700_read_instruction(A) m_program->read_byte(A) #define spc700_read_8_immediate(A) m_program->read_byte(A) #define spc700_jumping(A) #define spc700_branching(A) const device_type SPC700 = &device_creator<spc700_device>; spc700_device::spc700_device(const machine_config &mconfig, const char *tag, device_t *owner, UINT32 clock) : cpu_device(mconfig, SPC700, "SPC700", tag, owner, clock, "spc700", __FILE__) , m_program_config("program", ENDIANNESS_LITTLE, 8, 16, 0) , m_a(0) , m_x(0) , m_y(0) , m_s(0) , m_pc(0) , m_ppc(0) , m_debugger_temp(0) { } /* ======================================================================== */ /* ============================ UTILITY MACROS ============================ */ /* ======================================================================== */ /* Use up clock cycles */ #define CLK(A) CLOCKS -= (A) #define CLK_ALL() CLOCKS = 0 UINT32 spc700_device::read_8_normal(UINT32 address) { address = MAKE_UINT_16(address); return spc700_read_8(address); } UINT32 spc700_device::read_8_immediate(UINT32 address) { address = MAKE_UINT_16(address); return spc700_read_8_immediate(address); } UINT32 spc700_device::read_8_instruction(UINT32 address) { address = MAKE_UINT_16(address); return spc700_read_instruction(address); } UINT32 spc700_device::read_8_direct(UINT32 address) { address = MAKE_UINT_8(address) | FLAG_P; return spc700_read_8_direct(address); } void spc700_device::write_8_normal(UINT32 address, UINT32 value) { address = MAKE_UINT_16(address); value = MAKE_UINT_8(value); spc700_write_8(address, value); } void spc700_device::write_8_direct(UINT32 address, UINT32 value) { address = MAKE_UINT_8(address) | FLAG_P; value = MAKE_UINT_8(value); spc700_write_8_direct(address, value); } UINT32 spc700_device::read_16_normal(UINT32 address) { return read_8_normal(address) | (read_8_normal(address+1)<<8); } UINT32 spc700_device::read_16_immediate(UINT32 address) { return read_8_immediate(address) | (read_8_immediate(address+1)<<8); } UINT32 spc700_device::read_16_direct(UINT32 address) { return read_8_direct(address) | (read_8_direct(address+1)<<8); } void spc700_device::write_16_direct(UINT32 address, UINT32 value) { write_8_direct(address, value); write_8_direct(address+1, value>>8); } /* Low level memory access macros */ #define read_8_NORM(A) read_8_normal(A) #define read_8_IMM(A) read_8_immediate(A) #define read_8_ABS(A) read_8_normal(A) #define read_8_ABX(A) read_8_normal(A) #define read_8_ABY(A) read_8_normal(A) #define read_8_AXI(A) read_8_normal(A) #define read_8_DP(A) read_8_direct(A) #define read_8_DPX(A) read_8_direct(A) #define read_8_DPY(A) read_8_direct(A) #define read_8_DPI(A) read_8_normal(A) #define read_8_DXI(A) read_8_normal(A) #define read_8_DIY(A) read_8_normal(A) #define read_8_STK(A) read_8_normal(A) #define read_8_XI(A) read_8_direct(A) #define read_8_XII(A) read_8_direct(A) #define read_8_YI(A) read_8_direct(A) #define read_16_NORM(A) read_16_normal(A) #define read_16_IMM(A) read_16_immediate(A) #define read_16_ABS(A) read_16_absolute(A) #define read_16_ABX(A) read_16_normal(A) #define read_16_DP(A) read_16_direct(A) #define read_16_DPX(A) read_16_direct(A) #define read_16_DPY(A) read_16_direct(A) #define read_16_DPI(A) read_16_normal(A) #define read_16_VEC(A) read_16_normal(A) #define read_16_XI(A) read_16_direct(A) #define read_16_XII(A) read_16_direct(A) #define read_16_YI(A) read_16_direct(A) #define write_8_NORM(A, V) write_8_normal(A, V) #define write_8_IMM(A, V) write_8_normal(A, V) #define write_8_ABS(A, V) write_8_normal(A, V) #define write_8_ABX(A, V) write_8_normal(A, V) #define write_8_ABY(A, V) write_8_normal(A, V) #define write_8_AXI(A, V) write_8_normal(A, V) #define write_8_DP(A, V) write_8_direct(A, V) #define write_8_DPX(A, V) write_8_direct(A, V) #define write_8_DPY(A, V) write_8_direct(A, V) #define write_8_DPI(A, V) write_8_normal(A, V) #define write_8_DXI(A, V) write_8_normal(A, V) #define write_8_DIY(A, V) write_8_normal(A, V) #define write_8_STK(A, V) write_8_normal(A, V) #define write_8_XI(A, V) write_8_direct(A, V) #define write_8_XII(A, V) write_8_direct(A, V) #define write_8_YI(A, V) write_8_direct(A, V) #define write_16_NORM(A, V) write_16_normal(A, V) #define write_16_ABS(A, V) write_16_normal(A, V) #define write_16_ABX(A, V) write_16_normal(A, V) #define write_16_ABY(A, V) write_16_normal(A, V) #define write_16_AXI(A, V) write_16_normal(A, V) #define write_16_DP(A, V) write_16_direct(A, V) #define write_16_DPX(A, V) write_16_direct(A, V) #define write_16_DPY(A, V) write_16_direct(A, V) #define write_16_DPI(A, V) write_16_normal(A, V) #define write_16_DXI(A, V) write_16_normal(A, V) #define write_16_DIY(A, V) write_16_normal(A, V) #define write_16_STK(A, V) write_16_normal(A, V) #define write_16_XI(A, V) write_16_direct(A, V) #define write_16_XII(A, V) write_16_direct(A, V) #define write_16_YI(A, V) write_16_direct(A, V) #define OPER_8_IMM() read_8_IMM(EA_IMM()) #define OPER_8_ABS() read_8_ABS(EA_ABS()) #define OPER_8_ABX() read_8_ABX(EA_ABX()) #define OPER_8_ABY() read_8_ABY(EA_ABY()) #define OPER_8_AXI() read_8_IND(EA_IND()) #define OPER_8_DP() read_8_DP(EA_DP()) #define OPER_8_DPX() read_8_DPX(EA_DPX()) #define OPER_8_DPY() read_8_DPY(EA_DPY()) #define OPER_8_DPI() read_8_DPI(EA_DPI()) #define OPER_8_DXI() read_8_DXI(EA_DXI()) #define OPER_8_DIY() read_8_DIY(EA_DIY()) #define OPER_8_XI() read_8_XI(EA_XI()) #define OPER_8_XII() read_8_XI(EA_XII()) #define OPER_8_YI() read_8_YI(EA_YI()) #define OPER_16_IMM() read_16_IMM(EA_IMM16()) #define OPER_16_ABS() read_16_ABS(EA_ABS()) #define OPER_16_ABX() read_16_ABX(EA_ABX()) #define OPER_16_ABY() read_16_ABY(EA_ABY()) #define OPER_16_AXI() read_16_IND(EA_IND()) #define OPER_16_DP() read_16_DP(EA_DP()) #define OPER_16_DPX() read_16_DPX(EA_DPX()) #define OPER_16_DPY() read_16_DPY(EA_DPY()) #define OPER_16_DPI() read_16_DPI(EA_DXI()) #define OPER_16_DXI() read_16_DXI(EA_DXI()) #define OPER_16_DIY() read_16_DIY(EA_DIY()) #define OPER_16_XI() read_16_XI(EA_XI()) #define OPER_16_XII() read_16_XI(EA_XII()) #define OPER_16_YI() read_16_YI(EA_YI()) /* Effective Address Calculations */ UINT32 spc700_device::EA_IMM() {return REG_PC++;} UINT32 spc700_device::EA_IMM16() {REG_PC += 2; return REG_PC-2;} UINT32 spc700_device::EA_ABS() {return OPER_16_IMM();} UINT32 spc700_device::EA_ABX() {return EA_ABS() + REG_X;} UINT32 spc700_device::EA_ABY() {return EA_ABS() + REG_Y;} UINT32 spc700_device::EA_AXI() {return OPER_16_ABX();} UINT32 spc700_device::EA_DP() {return OPER_8_IMM();} UINT32 spc700_device::EA_DPX() {return (EA_DP() + REG_X)&0xff;} UINT32 spc700_device::EA_DPY() {return (EA_DP() + REG_Y)&0xff;} UINT32 spc700_device::EA_DXI() {return OPER_16_DPX();} UINT32 spc700_device::EA_DIY() {UINT32 addr = OPER_16_DP(); if((addr&0xff00) != ((addr+REG_Y)&0xff00)) CLK(1); return addr + REG_Y;} UINT32 spc700_device::EA_XI() {return REG_X;} UINT32 spc700_device::EA_XII() {UINT32 val = REG_X;REG_X = MAKE_UINT_8(REG_X+1);return val;} UINT32 spc700_device::EA_YI() {return REG_Y;} /* Change the Program Counter */ void spc700_device::JUMP(UINT32 address) { REG_PC = address; spc700_jumping(REG_PC); } void spc700_device::BRANCH(UINT32 offset) { REG_PC = MAKE_UINT_16(REG_PC + MAKE_INT_8(offset)); spc700_branching(REG_PC); } #define GET_REG_YA() (REG_A | (REG_Y<<8)) void spc700_device::SET_REG_YA(UINT32 value) { REG_A = MAKE_UINT_8(value); REG_Y = MAKE_UINT_8(value>>8); } /* Get the Processor Status Register */ #define GET_REG_P() \ ((FLAG_N & 0x80) | \ ((FLAG_V & 0x80) >> 1) | \ (FLAG_P>>3) | \ FLAG_B | \ (FLAG_H& HFLAG_SET) | \ FLAG_I | \ ((!FLAG_Z) << 1) | \ CFLAG_AS_1()) /* Set the Process Status Register */ void spc700_device::SET_REG_P(UINT32 value) { FLAG_N = (value & 0x80); FLAG_Z = !(value & 2); FLAG_V = value<<1; FLAG_P = (value & FLAGPOS_P) << 3; FLAG_B = value & FLAGPOS_B; FLAG_H = value & HFLAG_SET; FLAG_C = value << 8; SET_FLAG_I(value); } /* Push/Pull data to/from the stack */ void spc700_device::PUSH_8(UINT32 value) { write_8_STK(REG_S+STACK_PAGE, value); REG_S = MAKE_UINT_8(REG_S - 1); } UINT32 spc700_device::PULL_8() { REG_S = MAKE_UINT_8(REG_S + 1); return read_8_STK(REG_S+STACK_PAGE); } void spc700_device::PUSH_16(UINT32 value) { PUSH_8(value>>8); PUSH_8(value); } UINT32 spc700_device::PULL_16() { UINT32 value = PULL_8(); return value | (PULL_8()<<8); } void spc700_device::CHECK_IRQ() { if(FLAG_I & LINE_IRQ) SERVICE_IRQ(); } void spc700_device::SERVICE_IRQ() { fatalerror("spc700: SERVICE_IRQ() not implemented yet!\n"); } void spc700_device::SET_FLAG_I(UINT32 value) { FLAG_I = value & IFLAG_SET; #if !SPC700_OPTIMIZE_SNES CHECK_IRQ(); #endif } /* ======================================================================== */ /* =========================== OPERATION MACROS =========================== */ /* ======================================================================== */ #define SUBOP_ADC(A, B) \ m_spc_int16 = (A) + (B) + CFLAG_AS_1(); \ TMP1 = ((A) & 0x0f) + (CFLAG_AS_1()); \ FLAG_C = (m_spc_int16 > 0xff) ? CFLAG_SET : 0; \ FLAG_V = (~((A) ^ (B))) & (((A) ^ m_spc_int16) & 0x80); \ FLAG_H = (((m_spc_int16 & 0x0f) - TMP1) & 0x10) >> 1; \ FLAG_NZ = (UINT8)m_spc_int16 /* Add With Carry */ #define OP_ADC(BCLK, MODE) \ CLK(BCLK); \ SRC = OPER_8_##MODE(); \ SUBOP_ADC(SRC, REG_A); \ REG_A = (UINT8)m_spc_int16; /* Add With Carry to memory */ #define OP_ADCM(BCLK, SMODE, DMODE) \ CLK(BCLK); \ SRC = OPER_8_##SMODE(); \ DST = EA_##DMODE(); \ SUBOP_ADC(SRC, read_8_##DMODE(DST)); \ write_8_##DMODE(DST, (UINT8)m_spc_int16) /* Add word */ #define OP_ADDW(BCLK) \ CLK(BCLK); \ SRC = OPER_16_DP(); \ DST = GET_REG_YA(); \ TMP1 = ((SRC) & 0xff) + ((DST) & 0xff); \ TMP2 = (TMP1 > 0xff) ? 1 : 0; \ TMP3 = ((SRC) >> 8) + ((DST) >> 8) + TMP2; \ m_spc_int16 = ((TMP1 & 0xff) + (TMP3 << 8)) & 0xffff; \ FLAG_C = (TMP3 > 0xff) ? CFLAG_SET : 0; \ FLAG_H = ((unsigned) ((((DST) >> 8) & 0x0F) + \ (((SRC) >> 8) & 0x0F) + TMP2)) > 0x0F ? HFLAG_SET : 0; \ FLAG_V = (~((DST) ^ (SRC)) & ((SRC) ^ (UINT16) m_spc_int16) & 0x8000) ? VFLAG_SET : 0; \ FLAG_Z = (m_spc_int16 != 0); \ FLAG_N = (m_spc_int16>>8); \ SET_REG_YA(m_spc_int16); /* Logical AND with accumulator */ #define OP_AND(BCLK, MODE) \ CLK(BCLK); \ FLAG_NZ = REG_A &= OPER_8_##MODE() /* Logical AND operand */ #define OP_ANDM(BCLK, SMODE, DMODE) \ CLK(BCLK); \ FLAG_NZ = OPER_8_##SMODE(); \ DST = EA_##DMODE(); \ FLAG_NZ &= read_8_##DMODE(DST); \ write_8_##DMODE(DST, FLAG_NZ) /* Logical AND bit to C */ #define OP_AND1(BCLK) \ CLK(BCLK); \ DST = EA_IMM16(); \ if(FLAG_C & CFLAG_SET) \ { \ DST = read_16_IMM(DST); \ SRC = 1 << (DST >> 13); \ DST &= 0x1fff; \ if(!(read_8_NORM(DST) & SRC)) \ FLAG_C = CFLAG_CLEAR; \ } /* AND negated bit to C */ #define OP_ANDN1(BCLK) \ CLK(BCLK); \ DST = EA_IMM16(); \ if(FLAG_C & CFLAG_SET) \ { \ DST = read_16_IMM(DST); \ SRC = 1 << (DST >> 13); \ DST &= 0x1fff; \ if(read_8_NORM(DST) & SRC) \ FLAG_C = CFLAG_CLEAR; \ } /* Arithmetic Shift Left accumulator */ #define OP_ASL(BCLK) \ CLK(BCLK); \ FLAG_C = REG_A << 1; \ FLAG_NZ = REG_A = MAKE_UINT_8(FLAG_C) /* Arithmetic Shift Left operand */ #define OP_ASLM(BCLK, MODE) \ CLK(BCLK); \ DST = EA_##MODE(); \ FLAG_C = read_8_##MODE(DST) << 1; \ FLAG_NZ = MAKE_UINT_8(FLAG_C); \ write_8_##MODE(DST, FLAG_NZ) /* Branch if Bit Reset */ #define OP_BBC(BCLK, BIT) \ CLK(BCLK); \ SRC = OPER_8_DP(); \ DST = OPER_8_IMM(); \ if(!(SRC & BIT)) \ { \ CLK(2); \ BRANCH(DST); \ } /* Branch if Bit Set */ #define OP_BBS(BCLK, BIT) \ CLK(BCLK); \ SRC = OPER_8_DP(); \ DST = OPER_8_IMM(); \ if(SRC & BIT) \ { \ CLK(2); \ BRANCH(DST); \ } /* Branch on Condition Code */ #define OP_BCC(BCLK, COND) \ CLK(BCLK); \ DST = OPER_8_IMM(); \ if(COND) \ { \ CLK(2); \ BRANCH(DST); \ } /* Branch Unconditional */ /* speed up busy loops */ #define OP_BRA(BCLK) \ CLK(BCLK); \ BRANCH(OPER_8_IMM()); \ if(REG_PC == REG_PPC) \ CLK_ALL() /* Cause a Break interrupt */ #define OP_BRK(BCLK) \ CLK(BCLK); \ PUSH_16(REG_PC); \ PUSH_8(GET_REG_P()); \ FLAG_B |= FLAGPOS_B; \ FLAG_I = IFLAG_CLEAR; \ JUMP(read_16_VEC(VECTOR_BRK)) /* Call subroutine */ #define OP_CALL(BCLK) \ CLK(BCLK); \ DST = EA_ABS(); \ PUSH_16(REG_PC); \ JUMP(DST) /* Compare accumulator and branch if not equal */ #define OP_CBNE(BCLK, MODE) \ CLK(BCLK); \ SRC = OPER_8_##MODE(); \ DST = EA_IMM(); \ if(SRC != REG_A) \ { \ CLK(2); \ BRANCH(read_8_IMM(DST)); \ } /* Clear Carry flag */ #define OP_CLRC(BCLK) \ CLK(BCLK); \ FLAG_C = CFLAG_CLEAR /* Clear Memory Bit */ #define OP_CLR(BCLK, BIT) \ CLK(BCLK); \ DST = EA_DP(); \ SRC = read_8_DP(DST) & ~BIT; \ write_8_DP(DST, SRC) /* Clear Overflow flag (also clears half-carry) */ #define OP_CLRV(BCLK) \ CLK(BCLK); \ FLAG_V = VFLAG_CLEAR; \ FLAG_H = 0; /* Clear the Page flag */ #define OP_CLRP(BCLK) \ CLK(BCLK); \ FLAG_P = PFLAG_CLEAR /* Compare operand to register */ #define OP_CMPR(BCLK, REG, MODE) \ CLK(BCLK); \ SRC = OPER_8_##MODE(); \ m_spc_int16 = (short)REG - (short)SRC; \ FLAG_C = (m_spc_int16 >= 0) ? CFLAG_SET : 0; \ FLAG_NZ = MAKE_UINT_8(m_spc_int16); /* Compare memory */ #define OP_CMPM(BCLK, SMODE, DMODE) \ CLK(BCLK); \ SRC = OPER_8_##SMODE(); \ m_spc_int16 = (short)OPER_8_##DMODE() - (short)SRC; \ FLAG_C = (m_spc_int16 >= 0) ? CFLAG_SET : 0; \ FLAG_NZ = MAKE_UINT_8(m_spc_int16); /* Compare word */ #define OP_CMPW(BCLK, MODE) \ CLK(BCLK); \ SRC = OPER_16_##MODE(); \ m_spc_int32 = (int)GET_REG_YA() - (int)SRC; \ FLAG_C = (m_spc_int32 >= 0) ? CFLAG_SET : 0; \ FLAG_NZ = NZFLAG_16(m_spc_int32); /* Decimal adjust for addition */ #define OP_DAA(BCLK) \ CLK(BCLK); \ SRC = REG_A; \ if (((SRC & 0x0f) > 9) || (FLAG_H & HFLAG_SET)) \ { \ REG_A += 6; \ if (REG_A < 6) \ { \ FLAG_C = CFLAG_SET; \ } \ } \ if ((SRC > 0x99) || (FLAG_C & CFLAG_SET)) \ { \ REG_A += 0x60; \ FLAG_C = CFLAG_SET; \ } \ FLAG_NZ = REG_A = MAKE_UINT_8(REG_A); /* Decimal adjust for subtraction */ #define OP_DAS(BCLK) \ CLK(BCLK); \ SRC = REG_A; \ if (!(FLAG_H & HFLAG_SET) || ((SRC & 0xf) > 9)) \ { \ REG_A -= 6; \ } \ if (!(FLAG_C & CFLAG_SET) || (SRC > 0x99)) \ { \ REG_A -= 0x60; \ FLAG_C = 0; \ } \ FLAG_NZ = REG_A = MAKE_UINT_8(REG_A) /* Decrement register and branch if not zero */ /* speed up busy loops */ #define OP_DBNZR(BCLK) \ CLK(BCLK); \ REG_Y = MAKE_UINT_8(REG_Y - 1); \ DST = EA_IMM(); \ if(REG_Y != 0) \ { \ CLK(2); \ BRANCH(read_8_IMM(DST)); \ } /* Decrement operand and branch if not zero */ /* Speed up busy loops but do reads/writes for compatibility */ #define OP_DBNZM(BCLK) \ CLK(BCLK); \ DST = EA_DP(); \ SRC = MAKE_UINT_8(read_8_DP(DST) - 1); \ write_8_DP(DST, SRC); \ DST = EA_IMM(); \ if(SRC != 0) \ { \ CLK(2); \ BRANCH(read_8_IMM(DST)); \ } /* Decrement register */ #define OP_DECR(BCLK, REG) \ CLK(BCLK); \ FLAG_NZ = REG = MAKE_UINT_8(REG - 1) /* Decrement operand */ #define OP_DECM(BCLK, MODE) \ CLK(BCLK); \ DST = EA_##MODE(); \ FLAG_NZ = MAKE_UINT_8(read_8_##MODE(DST) - 1); \ write_8_##MODE(DST, FLAG_NZ) /* Decrement word */ #define OP_DECW(BCLK) \ CLK(BCLK); \ DST = EA_DP(); \ FLAG_NZ = MAKE_UINT_16(read_16_DP(DST) - 1); \ write_16_DP(DST, FLAG_Z); \ FLAG_NZ = NZFLAG_16(FLAG_Z) /* Disable interrupts */ #define OP_DI(BCLK) \ CLK(BCLK); \ FLAG_I = IFLAG_CLEAR /* Divide - should be almost exactly how the hardware works */ #define OP_DIV(BCLK) \ CLK(BCLK); \ TMP1 = SRC = GET_REG_YA(); \ TMP2 = (REG_X << 9); \ FLAG_H = 0; \ if ((REG_Y & 0xf) >= (REG_X & 0xf)) FLAG_H = HFLAG_SET; \ for (TMP3 = 0; TMP3 < 9; TMP3++) \ { \ TMP1 <<= 1; \ if (TMP1 & 0x20000) TMP1 = (TMP1 & 0x1ffff) | 1; \ if (TMP1 >= TMP2) TMP1 ^= 1; \ if (TMP1 & 1) TMP1 = ((TMP1 - TMP2) & 0x1ffff); \ } \ FLAG_V = (TMP1 & 0x100) ? VFLAG_SET : 0; \ SET_REG_YA((((TMP1 >> 9) & 0xff) << 8) + (TMP1 & 0xff)); \ FLAG_NZ = MAKE_UINT_8(GET_REG_YA()); /* Enable interrupts */ #define OP_EI(BCLK) \ CLK(BCLK); \ FLAG_I = IFLAG_SET /* Exclusive Or operand to accumulator */ #define OP_EOR(BCLK, MODE) \ CLK(BCLK); \ FLAG_NZ = REG_A ^= OPER_8_##MODE() /* Logical EOR operand */ #define OP_EORM(BCLK, SMODE, DMODE) \ CLK(BCLK); \ FLAG_NZ = OPER_8_##SMODE(); \ DST = EA_##DMODE(); \ FLAG_NZ ^= read_8_##DMODE(DST); \ write_8_##DMODE(DST, FLAG_NZ) /* Exclusive OR bit to C */ #define OP_EOR1(BCLK) \ CLK(BCLK); \ DST = OPER_16_IMM(); \ SRC = 1 << (DST >> 13); \ DST &= 0x1fff; \ if(read_8_NORM(DST) & SRC) \ FLAG_C = ~FLAG_C /* Increment register */ #define OP_INCR(BCLK, REG) \ CLK(BCLK); \ FLAG_NZ = REG = MAKE_UINT_8(REG + 1) /* Increment operand */ #define OP_INCM(BCLK, MODE) \ CLK(BCLK); \ DST = EA_##MODE(); \ FLAG_NZ = MAKE_UINT_8(read_8_##MODE(DST) + 1); \ write_8_##MODE(DST, FLAG_NZ) /* Increment word */ #define OP_INCW(BCLK) \ CLK(BCLK); \ DST = EA_DP(); \ FLAG_NZ = MAKE_UINT_16(read_16_DP(DST) + 1); \ write_16_DP(DST, FLAG_Z); \ FLAG_NZ = NZFLAG_16(FLAG_Z) /* Jump */ /* If we're in a busy loop, eat all clock cycles */ #define OP_JMP(BCLK, MODE) \ CLK(BCLK); \ JUMP(EA_##MODE()); \ if(REG_PC == REG_PPC) \ CLK_ALL() /* Jump to Subroutine */ #define OP_JSR(BCLK, MODE) \ CLK(BCLK); \ PUSH_16(REG_PC); \ JUMP(EA_##MODE()) /* Logical Shift Right accumulator */ #define OP_LSR(BCLK) \ CLK(BCLK); \ FLAG_C = REG_A << 8; \ FLAG_NZ = REG_A >>= 1 /* Logical Shift Right operand */ #define OP_LSRM(BCLK, MODE) \ CLK(BCLK); \ DST = EA_##MODE(); \ FLAG_NZ = read_8_##MODE(DST); \ FLAG_C = FLAG_NZ << 8; \ FLAG_NZ >>= 1; \ write_8_##MODE(DST, FLAG_NZ) /* Move from register to register */ #define OP_MOVRR(BCLK, SREG, DREG) \ CLK(BCLK); \ FLAG_NZ = DREG = SREG /* Move from register to memory */ #define OP_MOVRM(BCLK, SREG, DMODE) \ CLK(BCLK); \ write_8_##DMODE(EA_##DMODE(), SREG) /* Move from memory to register */ #define OP_MOVMR(BCLK, SMODE, DREG) \ CLK(BCLK); \ FLAG_NZ = DREG = OPER_8_##SMODE() /* Move from memory to memory */ #define OP_MOVMM(BCLK, SMODE, DMODE) \ CLK(BCLK); \ SRC = OPER_8_##SMODE(); \ DST = EA_##DMODE(); \ write_8_##DMODE(DST, SRC) /* Move word register to memory */ #define OP_MOVWRM(BCLK) \ CLK(BCLK); \ write_16_DP(EA_DP(), GET_REG_YA()) /* Move word memory to register */ #define OP_MOVWMR(BCLK) \ CLK(BCLK); \ FLAG_NZ = OPER_16_DP(); \ SET_REG_YA(FLAG_Z); \ FLAG_NZ = NZFLAG_16(FLAG_Z) /* Move from Stack pointer to X */ #define OP_MOVSX(BCLK) \ CLK(BCLK); \ FLAG_NZ = REG_X = REG_S /* Move from X to Stack pointer */ #define OP_MOVXS(BCLK) \ CLK(BCLK); \ REG_S = REG_X /* Move bit from memory to C */ #define OP_MOV1C(BCLK) \ CLK(BCLK); \ DST = OPER_16_IMM(); \ SRC = 1 << (DST >> 13); \ DST &= 0x1fff; \ FLAG_C = ((read_8_NORM(DST) & SRC) != 0) << 8 /* Move bit from C to memory */ #define OP_MOV1M(BCLK) \ CLK(BCLK); \ DST = OPER_16_IMM(); \ SRC = 1 << (DST >> 13); \ DST &= 0x1fff; \ if(FLAG_C & CFLAG_SET) \ write_8_NORM(DST, read_8_NORM(DST) | SRC); \ else \ write_8_NORM(DST, read_8_NORM(DST) & ~SRC) /* Multiply A and Y and store result in YA */ #define OP_MUL(BCLK) \ CLK(BCLK); \ SRC = REG_Y * REG_A; \ REG_A = MAKE_UINT_8(SRC); \ FLAG_NZ = REG_Y = SRC >> 8; /* No Operation */ #define OP_NOP(BCLK) \ CLK(BCLK) /* Invert the C flag */ #define OP_NOTC(BCLK) \ CLK(BCLK); \ FLAG_C = ~FLAG_C /* NOT bit */ #define OP_NOT1(BCLK) \ CLK(BCLK); \ DST = OPER_16_IMM(); \ SRC = 1 << (DST >> 13); \ DST &= 0x1fff; \ write_8_NORM(DST, read_8_NORM(DST) ^ SRC) /* Logical OR operand to accumulator */ #define OP_OR(BCLK, MODE) \ CLK(BCLK); \ FLAG_NZ = REG_A |= OPER_8_##MODE() /* Logical OR operand */ #define OP_ORM(BCLK, SMODE, DMODE) \ CLK(BCLK); \ FLAG_NZ = OPER_8_##SMODE(); \ DST = EA_##DMODE(); \ FLAG_NZ |= read_8_##DMODE(DST); \ write_8_##DMODE(DST, FLAG_NZ) /* Logical OR bit to C */ #define OP_OR1(BCLK) \ CLK(BCLK); \ DST = EA_IMM16(); \ if(!(FLAG_C & CFLAG_SET)) \ { \ DST = read_16_IMM(DST); \ SRC = 1 << (DST >> 13); \ DST &= 0x1fff; \ if(read_8_NORM(DST) & SRC) \ FLAG_C = CFLAG_SET; \ } /* OR negated bit to C */ #define OP_ORN1(BCLK) \ CLK(BCLK); \ DST = EA_IMM16(); \ if(!(FLAG_C & CFLAG_SET)) \ { \ DST = read_16_IMM(DST); \ SRC = 1 << (DST >> 13); \ DST &= 0x1fff; \ if(!(read_8_NORM(DST) & SRC)) \ FLAG_C = CFLAG_SET; \ } /* UPage Call */ #define OP_PCALL(BCLK) \ CLK(BCLK); \ DST = EA_DP(); \ PUSH_16(REG_PC); \ JUMP(0xff00 | DST) /* Push a register to the stack */ #define OP_PUSH(BCLK, REG) \ CLK(BCLK); \ PUSH_8(REG) /* Push the Processor Status Register to the stack */ #define OP_PHP(BCLK) \ CLK(BCLK); \ PUSH_8(GET_REG_P()) /* Pull a register from the stack */ #define OP_PULL(BCLK, REG) \ CLK(BCLK); \ REG = PULL_8() /* Pull the Processor Status Register from the stack */ #define OP_PLP(BCLK) \ CLK(BCLK); \ SET_REG_P(PULL_8()) /* Return from Subroutine */ #define OP_RET(BCLK) \ CLK(BCLK); \ JUMP(PULL_16()) /* Return from Interrupt */ #define OP_RETI(BCLK) \ CLK(BCLK); \ SET_REG_P(PULL_8()); \ JUMP(PULL_16()) /* Rotate Left the accumulator */ #define OP_ROL(BCLK) \ CLK(BCLK); \ FLAG_C = (REG_A<<1) | CFLAG_AS_1(); \ FLAG_NZ = REG_A = MAKE_UINT_8(FLAG_C) /* Rotate Left an operand */ #define OP_ROLM(BCLK, MODE) \ CLK(BCLK); \ DST = EA_##MODE(); \ FLAG_C = (read_8_##MODE(DST)<<1) | CFLAG_AS_1(); \ FLAG_NZ = MAKE_UINT_8(FLAG_C); \ write_8_##MODE(DST, FLAG_NZ) /* Rotate Right the accumulator */ #define OP_ROR(BCLK) \ CLK(BCLK); \ REG_A |= FLAG_C & 0x100; \ FLAG_C = REG_A << 8; \ FLAG_NZ = REG_A >>= 1 /* Rotate Right an operand */ #define OP_RORM(BCLK, MODE) \ CLK(BCLK); \ DST = EA_##MODE(); \ FLAG_NZ = read_8_##MODE(DST) | (FLAG_C & 0x100); \ FLAG_C = FLAG_NZ << 8; \ FLAG_NZ >>= 1; \ write_8_##MODE(DST, FLAG_NZ) /* Subtract with Carry */ #define OP_SBC(BCLK, MODE) \ CLK(BCLK); \ SRC = OPER_8_##MODE(); \ TMP2 = REG_A - SRC - (CFLAG_AS_1() ^ 1); \ SUBOP_ADC(REG_A, ~SRC); \ FLAG_C = (TMP2 <= 0xff) ? CFLAG_SET : 0; \ REG_A = (UINT8)m_spc_int16; /* Subtract With Carry to memory */ #define OP_SBCM(BCLK, SMODE, DMODE) \ CLK(BCLK); \ SRC = OPER_8_##SMODE(); \ DST = EA_##DMODE(); \ TMP3 = read_8_##DMODE(DST); \ TMP2 = TMP3 - SRC - (CFLAG_AS_1() ^ 1); \ SUBOP_ADC(~SRC, TMP3); \ FLAG_C = (TMP2 <= 0xff) ? CFLAG_SET : 0; \ write_8_##DMODE(DST, (UINT8)m_spc_int16) /* Set Carry flag */ #define OP_SETC(BCLK) \ CLK(BCLK); \ FLAG_C = CFLAG_SET /* Set Page flag */ #define OP_SETP(BCLK) \ CLK(BCLK); \ FLAG_P = PFLAG_SET /* Set Memory Bit */ #define OP_SET(BCLK, BIT) \ CLK(BCLK); \ DST = EA_DP(); \ SRC = read_8_DP(DST) | BIT; \ write_8_DP(DST, SRC) /* Put the CPU to sleep */ #define OP_SLEEP(BCLK) \ CLK(BCLK); \ CPU_STOPPED |= STOP_LEVEL_SLEEP; \ CLK_ALL() /* Stop the CPU */ #define OP_STOP(BCLK) \ CLK(BCLK); \ CPU_STOPPED |= STOP_LEVEL_STOP; \ CLK_ALL() /* Subtract word */ #define OP_SUBW(BCLK) \ CLK(BCLK); \ SRC = OPER_16_DP(); \ DST = GET_REG_YA(); \ TMP1 = ((DST) & 0xff) - ((SRC) & 0xff); \ TMP2 = (TMP1 > 0xff) ? 1 : 0; \ TMP3 = ((DST) >> 8) - ((SRC) >> 8) - TMP2; \ m_spc_int16 = ((TMP1 & 0xff) + (TMP3 << 8)) & 0xffff; \ FLAG_C = (TMP3 <= 0xff) ? CFLAG_SET : 0; \ FLAG_H = ((unsigned) ((((DST) >> 8) & 0x0F) - \ (((SRC) >> 8) & 0x0F) - TMP2)) > 0x0F ? 0: HFLAG_SET; \ FLAG_V = (((DST) ^ (SRC)) & ((DST) ^ (UINT16) m_spc_int16) & 0x8000) ? VFLAG_SET : 0; \ FLAG_Z = (m_spc_int16 != 0); \ FLAG_N = (m_spc_int16>>8); \ SET_REG_YA(m_spc_int16); /* Table Call */ #define OP_TCALL(BCLK, NUM) \ CLK(BCLK); \ PUSH_16(REG_PC); \ JUMP(read_16_NORM(0xffc0 + ((15-NUM)<<1))) /* Test and Clear Bits */ #define OP_TCLR1(BCLK, MODE) \ CLK(BCLK); \ DST = EA_##MODE(); \ FLAG_NZ = read_8_##MODE(DST); \ write_8_##MODE(DST, FLAG_NZ & ~REG_A); \ FLAG_NZ &= REG_A /* Test and Set Bits */ #define OP_TSET1(BCLK, MODE) \ CLK(BCLK); \ DST = EA_##MODE(); \ FLAG_NZ = read_8_##MODE(DST); \ write_8_##MODE(DST, FLAG_NZ | REG_A); \ FLAG_NZ &= REG_A /* Exchange high and low nybbles of accumulator */ #define OP_XCN(BCLK) \ CLK(BCLK); \ FLAG_NZ = REG_A = MAKE_UINT_8((REG_A<<4) | (REG_A>>4)) #define OP_ILLEGAL(BCLK) \ CLK(BCLK) /* ======================================================================== */ /* ================================= API ================================== */ /* ======================================================================== */ void spc700_device::device_start() { m_program = &space(AS_PROGRAM); save_item(NAME(m_a)); save_item(NAME(m_x)); save_item(NAME(m_y)); save_item(NAME(m_s)); save_item(NAME(m_pc)); save_item(NAME(m_ppc)); save_item(NAME(m_flag_n)); save_item(NAME(m_flag_z)); save_item(NAME(m_flag_v)); save_item(NAME(m_flag_p)); save_item(NAME(m_flag_b)); save_item(NAME(m_flag_h)); save_item(NAME(m_flag_i)); save_item(NAME(m_flag_c)); save_item(NAME(m_line_irq)); save_item(NAME(m_line_nmi)); save_item(NAME(m_line_rst)); save_item(NAME(m_ir)); save_item(NAME(m_stopped)); save_item(NAME(m_ICount)); save_item(NAME(m_source)); save_item(NAME(m_destination)); save_item(NAME(m_temp1)); save_item(NAME(m_temp2)); save_item(NAME(m_temp3)); save_item(NAME(m_spc_int16)); save_item(NAME(m_spc_int32)); // Register state for debugger state_add( SPC700_PC, "PC", m_pc ).formatstr("%04X"); state_add( SPC700_S, "S", m_s ).formatstr("%02X"); state_add( SPC700_P, "P", m_debugger_temp ).callimport().callexport().formatstr("%02X"); state_add( SPC700_A, "A", m_a ).formatstr("%02X"); state_add( SPC700_X, "X", m_x ).formatstr("%02X"); state_add( SPC700_Y, "Y", m_y ).formatstr("%02X"); state_add(STATE_GENPC, "curpc", m_pc).formatstr("%04X").noshow(); state_add(STATE_GENSP, "GENSP", m_debugger_temp).mask(0x1ff).callexport().formatstr("%04X").noshow(); state_add(STATE_GENFLAGS, "GENFLAGS", m_debugger_temp).formatstr("%8s").noshow(); state_add(STATE_GENPCBASE, "GENPCBASE", m_ppc).formatstr("%04X").noshow(); m_icountptr = &m_ICount; } void spc700_device::state_string_export(const device_state_entry &entry, std::string &str) const { switch (entry.index()) { case STATE_GENFLAGS: str = string_format("%c%c%c%c%c%c%c%c", (m_flag_n & 0x80) ? 'N':'.', ((m_flag_v & 0x80) >> 1) ? 'V':'.', (m_flag_p>>3) ? 'P':'.', (m_flag_b) ? 'B':'.', (m_flag_h & HFLAG_SET) ? 'H':'.', ( m_flag_i) ? 'I':'.', ((!m_flag_z) << 1) ? 'Z':'.', ((m_flag_c >> 8)&1) ? 'C':'.' ); break; } } void spc700_device::state_import(const device_state_entry &entry) { switch (entry.index()) { case SPC700_P: SET_REG_P(m_debugger_temp); break; } } void spc700_device::state_export(const device_state_entry &entry) { switch (entry.index()) { case SPC700_P: m_debugger_temp = ((m_flag_n & 0x80) | ((m_flag_v & 0x80) >> 1) | m_flag_p>>3 | m_flag_b | (m_flag_h & HFLAG_SET) | m_flag_i | ((!m_flag_z) << 1) | ((m_flag_c >> 8)&1)); break; case STATE_GENSP: m_debugger_temp = m_s + STACK_PAGE; break; } } void spc700_device::device_reset() { CPU_STOPPED = 0; LINE_IRQ = 0; LINE_NMI = 0; REG_S = 0; FLAG_NZ = NZFLAG_CLEAR; FLAG_V = VFLAG_CLEAR; FLAG_P = PFLAG_CLEAR; FLAG_B = BFLAG_CLEAR; FLAG_H = HFLAG_CLEAR; FLAG_I = IFLAG_CLEAR; FLAG_C = CFLAG_CLEAR; JUMP(read_16_VEC(VECTOR_RST)); } void spc700_device::execute_set_input( int inptnum, int state ) { if ( inptnum == INPUT_LINE_NMI ) { /* Assert or clear the NMI line of the CPU */ #if !SPC700_OPTIMIZE_SNES if(state == CLEAR_LINE) LINE_NMI = 0; else if(!LINE_NMI) { LINE_NMI = 1; CLK(7); PUSH_16(REG_PC); PUSH_8(GET_REG_P()); JUMP(read_16_VEC(VECTOR_NMI)); } #endif /* SPC700_OPTIMIZE_SNES */ } else { /* Assert or clear the IRQ line of the CPU */ #if !SPC700_OPTIMIZE_SNES LINE_IRQ = (state != CLEAR_LINE) ? IRQ_SET : IRQ_CLEAR; CHECK_IRQ(); #endif /* SPC700_OPTIMIZE_SNES */ } } #include "spc700ds.h" offs_t spc700_device::disasm_disassemble(char *buffer, offs_t pc, const UINT8 *oprom, const UINT8 *opram, UINT32 options) { return CPU_DISASSEMBLE_NAME(spc700)(this, buffer, pc, oprom, opram, options); } //int dump_flag = 0; /* Execute instructions for <clocks> cycles */ void spc700_device::execute_run() { if (CPU_STOPPED) { CLOCKS = 0; return; } while(CLOCKS > 0) { REG_PPC = REG_PC; debugger_instruction_hook(this, REG_PC); REG_PC++; switch(REG_IR = read_8_immediate(REG_PPC)) { case 0x00: OP_NOP ( 2 ); break; /* NOP */ case 0x01: OP_TCALL ( 8, 0 ); break; /* TCALL 0 */ case 0x02: OP_SET ( 4, BIT_0 ); break; /* SET 0 */ case 0x03: OP_BBS ( 5, BIT_0 ); break; /* BBS 0 */ case 0x04: OP_OR ( 3, DP ); break; /* ORA dp */ case 0x05: OP_OR ( 4, ABS ); break; /* ORA abs */ case 0x06: OP_OR ( 3, XI ); break; /* ORA xi */ case 0x07: OP_OR ( 6, DXI ); break; /* ORA dxi */ case 0x08: OP_OR ( 2, IMM ); break; /* ORA imm */ case 0x09: OP_ORM ( 6, DP , DP ); break; /* ORM dp dp */ case 0x0a: OP_OR1 ( 5 ); break; /* OR1 bit */ case 0x0b: OP_ASLM ( 4, DP ); break; /* ASL dp */ case 0x0c: OP_ASLM ( 5, ABS ); break; /* ASL abs */ case 0x0d: OP_PHP ( 4 ); break; /* PHP */ case 0x0e: OP_TSET1 ( 6, ABS ); break; /* TSET1 abs */ case 0x0f: OP_BRK ( 8 ); break; /* BRK */ case 0x10: OP_BCC ( 2, COND_PL() ); break; /* BPL */ case 0x11: OP_TCALL ( 8, 1 ); break; /* TCALL 1 */ case 0x12: OP_CLR ( 4, BIT_0 ); break; /* CLR 0 */ case 0x13: OP_BBC ( 5, BIT_0 ); break; /* BBC 0 */ case 0x14: OP_OR ( 4, DPX ); break; /* ORA dpx */ case 0x15: OP_OR ( 5, ABX ); break; /* ORA abx */ case 0x16: OP_OR ( 5, ABY ); break; /* ORA aby */ case 0x17: OP_OR ( 6, DIY ); break; /* ORA diy */ case 0x18: OP_ORM ( 5, IMM, DP ); break; /* ORM dp, imm */ case 0x19: OP_ORM ( 5, YI, XI ); break; /* ORM xi, yi */ case 0x1a: OP_DECW ( 6 ); break; /* DECW di */ case 0x1b: OP_ASLM ( 5, DPX ); break; /* ASL dpx */ case 0x1c: OP_ASL ( 2 ); break; /* ASL a */ case 0x1d: OP_DECR ( 2, REG_X ); break; /* DEC x */ case 0x1e: OP_CMPR ( 4, REG_X, ABS ); break; /* CMP x, abs */ case 0x1f: OP_JMP ( 6, AXI ); break; /* JMP axi */ case 0x20: OP_CLRP ( 2 ); break; /* CLRP */ case 0x21: OP_TCALL ( 8, 2 ); break; /* TCALL 2 */ case 0x22: OP_SET ( 4, BIT_1 ); break; /* SET 1 */ case 0x23: OP_BBS ( 5, BIT_1 ); break; /* BBS 1 */ case 0x24: OP_AND ( 3, DP ); break; /* AND dp */ case 0x25: OP_AND ( 4, ABS ); break; /* AND abs */ case 0x26: OP_AND ( 3, XI ); break; /* AND xi */ case 0x27: OP_AND ( 6, DXI ); break; /* AND dxi */ case 0x28: OP_AND ( 2, IMM ); break; /* AND imm */ case 0x29: OP_ANDM ( 6, DP , DP ); break; /* AND dp, dp */ case 0x2a: OP_ORN1 ( 5 ); break; /* OR1 !bit */ case 0x2b: OP_ROLM ( 4, DP ); break; /* ROL dp */ case 0x2c: OP_ROLM ( 5, ABS ); break; /* ROL abs */ case 0x2d: OP_PUSH ( 4, REG_A ); break; /* PUSH a */ case 0x2e: OP_CBNE ( 5, DP ); break; /* CBNE dp */ case 0x2f: OP_BRA ( 4 ); break; /* BRA */ case 0x30: OP_BCC ( 2, COND_MI() ); break; /* BMI */ case 0x31: OP_TCALL ( 8, 3 ); break; /* TCALL 3 */ case 0x32: OP_CLR ( 4, BIT_1 ); break; /* CLR 1 */ case 0x33: OP_BBC ( 5, BIT_1 ); break; /* BBC 1 */ case 0x34: OP_AND ( 4, DPX ); break; /* AND dpx */ case 0x35: OP_AND ( 5, ABX ); break; /* AND abx */ case 0x36: OP_AND ( 5, ABY ); break; /* AND aby */ case 0x37: OP_AND ( 6, DIY ); break; /* AND diy */ case 0x38: OP_ANDM ( 5, IMM, DP ); break; /* AND dp, imm */ case 0x39: OP_ANDM ( 5, YI , XI ); break; /* AND xi, yi */ case 0x3a: OP_INCW ( 6 ); break; /* INCW di */ case 0x3b: OP_ROLM ( 5, DPX ); break; /* ROL dpx */ case 0x3c: OP_ROL ( 2 ); break; /* ROL acc */ case 0x3d: OP_INCR ( 2, REG_X ); break; /* INC x */ case 0x3e: OP_CMPR ( 3, REG_X, DP ); break; /* CMP x, dp */ case 0x3f: OP_CALL ( 8 ); break; /* CALL abs */ case 0x40: OP_SETP ( 2 ); break; /* RTI */ case 0x41: OP_TCALL ( 8, 4 ); break; /* TCALL 4 */ case 0x42: OP_SET ( 4, BIT_2 ); break; /* SET 2 */ case 0x43: OP_BBS ( 5, BIT_2 ); break; /* BBS 2 */ case 0x44: OP_EOR ( 3, DP ); break; /* EOR dp */ case 0x45: OP_EOR ( 4, ABS ); break; /* EOR abs */ case 0x46: OP_EOR ( 3, XI ); break; /* EOR xi */ case 0x47: OP_EOR ( 6, DXI ); break; /* EOR dxi */ case 0x48: OP_EOR ( 2, IMM ); break; /* EOR imm */ case 0x49: OP_EORM ( 6, DP, DP ); break; /* EOR dp, dp */ case 0x4a: OP_AND1 ( 4 ); break; /* AND1 bit */ case 0x4b: OP_LSRM ( 4, DP ); break; /* LSR dp */ case 0x4c: OP_LSRM ( 5, ABS ); break; /* LSR abs */ case 0x4d: OP_PUSH ( 4, REG_X ); break; /* PUSH x */ case 0x4e: OP_TCLR1 ( 6, ABS ); break; /* TCLR1 abs */ case 0x4f: OP_PCALL ( 6 ); break; /* PCALL */ case 0x50: OP_BCC ( 2, COND_VC() ); break; /* BVC */ case 0x51: OP_TCALL ( 8, 5 ); break; /* TCALL 5 */ case 0x52: OP_CLR ( 4, BIT_2 ); break; /* CLR 2 */ case 0x53: OP_BBC ( 5, BIT_2 ); break; /* BBC 2 */ case 0x54: OP_EOR ( 4, DPX ); break; /* EOR dpx */ case 0x55: OP_EOR ( 5, ABX ); break; /* EOR abx */ case 0x56: OP_EOR ( 5, ABY ); break; /* EOR aby */ case 0x57: OP_EOR ( 6, DIY ); break; /* EOR diy */ case 0x58: OP_EORM ( 5, IMM, DP ); break; /* EOR dp, imm */ case 0x59: OP_EORM ( 5, YI , XI ); break; /* EOR xi, yi */ case 0x5a: OP_CMPW ( 4, DP ); break; /* CMPW dp */ case 0x5b: OP_LSRM ( 5, DPX ); break; /* LSR dpx */ case 0x5c: OP_LSR ( 2 ); break; /* LSR */ case 0x5d: OP_MOVRR ( 2, REG_A, REG_X ); break; /* MOV X, A */ case 0x5e: OP_CMPR ( 4, REG_Y, ABS ); break; /* CMP Y, abs */ case 0x5f: OP_JMP ( 3, ABS ); break; /* JMP abs */ case 0x60: OP_CLRC ( 2 ); break; /* CLRC */ case 0x61: OP_TCALL ( 8, 6 ); break; /* TCALL 6 */ case 0x62: OP_SET ( 4, BIT_3 ); break; /* SET 3 */ case 0x63: OP_BBS ( 5, BIT_3 ); break; /* BBS 3 */ case 0x64: OP_CMPR ( 3, REG_A, DP ); break; /* CMP A, dp */ case 0x65: OP_CMPR ( 4, REG_A, ABS ); break; /* CMP A, abs */ case 0x66: OP_CMPR ( 3, REG_A, XI ); break; /* CMP A, xi */ case 0x67: OP_CMPR ( 6, REG_A, DXI ); break; /* CMP A, dxi */ case 0x68: OP_CMPR ( 2, REG_A, IMM ); break; /* CMP A, imm */ case 0x69: OP_CMPM ( 6, DP, DP ); break; /* CMP dp, dp */ case 0x6a: OP_ANDN1 ( 4 ); break; /* AND1 !bit */ case 0x6b: OP_RORM ( 4, DP ); break; /* ROR dp */ case 0x6c: OP_RORM ( 5, ABS ); break; /* ROR abs */ case 0x6d: OP_PUSH ( 4, REG_Y ); break; /* PUSH Y */ case 0x6e: OP_DBNZM ( 5 ); break; /* DBNZ dp */ case 0x6f: OP_RET ( 5 ); break; /* RET */ case 0x70: OP_BCC ( 2, COND_VS() ); break; /* BVS */ case 0x71: OP_TCALL ( 8, 7 ); break; /* TCALL 7 */ case 0x72: OP_CLR ( 4, BIT_3 ); break; /* CLR 3 */ case 0x73: OP_BBC ( 5, BIT_3 ); break; /* BBC 3 */ case 0x74: OP_CMPR ( 4, REG_A, DPX ); break; /* CMP A, dpx */ case 0x75: OP_CMPR ( 5, REG_A, ABX ); break; /* CMP A, abx */ case 0x76: OP_CMPR ( 5, REG_A, ABY ); break; /* CMP A, aby */ case 0x77: OP_CMPR ( 6, REG_A, DIY ); break; /* CMP A, diy */ case 0x78: OP_CMPM ( 5, IMM, DP ); break; /* CMP dp, imm */ case 0x79: OP_CMPM ( 5, YI, XI ); break; /* CMP xi, yi */ case 0x7a: OP_ADDW ( 5 ); break; /* ADDW di */ case 0x7b: OP_RORM ( 5, DPX ); break; /* ROR dpx */ case 0x7c: OP_ROR ( 2 ); break; /* ROR A */ case 0x7d: OP_MOVRR ( 2, REG_X, REG_A ); break; /* MOV A, X */ case 0x7e: OP_CMPR ( 3, REG_Y, DP ); break; /* CMP Y, dp */ case 0x7f: OP_RETI ( 6 ); break; /* RETI */ case 0x80: OP_SETC ( 2 ); break; /* SETC */ case 0x81: OP_TCALL ( 8, 8 ); break; /* TCALL 8 */ case 0x82: OP_SET ( 4, BIT_4 ); break; /* SET 4 */ case 0x83: OP_BBS ( 5, BIT_4 ); break; /* BBS 4 */ case 0x84: OP_ADC ( 3, DP ); break; /* ADC dp */ case 0x85: OP_ADC ( 4, ABS ); break; /* ADC abs */ case 0x86: OP_ADC ( 3, XI ); break; /* ADC xi */ case 0x87: OP_ADC ( 6, DXI ); break; /* ADC dxi */ case 0x88: OP_ADC ( 2, IMM ); break; /* ADC imm */ case 0x89: OP_ADCM ( 6, DP, DP ); break; /* ADC dp, dp */ case 0x8a: OP_EOR1 ( 5 ); break; /* EOR1 bit */ case 0x8b: OP_DECM ( 4, DP ); break; /* DEC dp */ case 0x8c: OP_DECM ( 5, ABS ); break; /* DEC abs */ case 0x8d: OP_MOVMR ( 2, IMM, REG_Y ); break; /* MOV Y, imm */ case 0x8e: OP_PLP ( 4 ); break; /* POP PSW */ case 0x8f: OP_MOVMM ( 5, IMM, DP ); break; /* MOV dp, imm */ case 0x90: OP_BCC ( 2, COND_CC() ); break; /* BCC */ case 0x91: OP_TCALL ( 8, 9 ); break; /* TCALL 9 */ case 0x92: OP_CLR ( 4, BIT_4 ); break; /* CLR 4 */ case 0x93: OP_BBC ( 5, BIT_4 ); break; /* BBC 4 */ case 0x94: OP_ADC ( 4, DPX ); break; /* ADC dpx */ case 0x95: OP_ADC ( 5, ABX ); break; /* ADC abx */ case 0x96: OP_ADC ( 5, ABY ); break; /* ADC aby */ case 0x97: OP_ADC ( 6, DIY ); break; /* ADC diy */ case 0x98: OP_ADCM ( 5, IMM, DP ); break; /* ADC dp, imm */ case 0x99: OP_ADCM ( 5, YI, XI ); break; /* ADC xi, yi */ case 0x9a: OP_SUBW ( 5 ); break; /* SUBW dp */ case 0x9b: OP_DECM ( 5, DPX ); break; /* DEC dpx */ case 0x9c: OP_DECR ( 2, REG_A ); break; /* DEC A */ case 0x9d: OP_MOVSX ( 2 ); break; /* MOV X, SP */ case 0x9e: OP_DIV (12 ); break; /* DIV YA, X */ case 0x9f: OP_XCN ( 5 ); break; /* XCN A */ case 0xa0: OP_EI ( 3 ); break; /* EI */ case 0xa1: OP_TCALL ( 8, 10 ); break; /* TCALL 10 */ case 0xa2: OP_SET ( 4, BIT_5 ); break; /* SET 5 */ case 0xa3: OP_BBS ( 5, BIT_5 ); break; /* BBS 5 */ case 0xa4: OP_SBC ( 3, DP ); break; /* SBC dp */ case 0xa5: OP_SBC ( 4, ABS ); break; /* SBC abs */ case 0xa6: OP_SBC ( 3, XI ); break; /* SBC xi */ case 0xa7: OP_SBC ( 6, DXI ); break; /* SBC dxi */ case 0xa8: OP_SBC ( 2, IMM ); break; /* SBC imm */ case 0xa9: OP_SBCM ( 6, DP, DP ); break; /* SBC dp, dp */ case 0xaa: OP_MOV1C ( 4 ); break; /* MOV1 bit->C */ case 0xab: OP_INCM ( 4, DP ); break; /* INC dp */ case 0xac: OP_INCM ( 5, ABS ); break; /* INC abs */ case 0xad: OP_CMPR ( 2, REG_Y, IMM ); break; /* CMP Y, imm */ case 0xae: OP_PULL ( 4, REG_A ); break; /* POP A */ case 0xaf: OP_MOVRM ( 4, REG_A, XII ); break; /* MOV xii, A */ case 0xb0: OP_BCC ( 2, COND_CS() ); break; /* BCS */ case 0xb1: OP_TCALL ( 8, 11 ); break; /* TCALL 11 */ case 0xb2: OP_CLR ( 4, BIT_5 ); break; /* CLR 5 */ case 0xb3: OP_BBC ( 5, BIT_5 ); break; /* BBC 5 */ case 0xb4: OP_SBC ( 4, DPX ); break; /* SBC dpx */ case 0xb5: OP_SBC ( 5, ABX ); break; /* SBC abx */ case 0xb6: OP_SBC ( 5, ABY ); break; /* SBC aby */ case 0xb7: OP_SBC ( 6, DIY ); break; /* SBC diy */ case 0xb8: OP_SBCM ( 5, IMM, DP ); break; /* SBC dp, imm */ case 0xb9: OP_SBCM ( 5, YI, XI ); break; /* SBC xi, yi */ case 0xba: OP_MOVWMR( 5 ); break; /* MOVW YA, dp */ case 0xbb: OP_INCM ( 5, DPX ); break; /* INC dpx */ case 0xbc: OP_INCR ( 2, REG_A ); break; /* INC A */ case 0xbd: OP_MOVXS ( 2 ); break; /* MOV SP, X */ case 0xbe: OP_DAS ( 3 ); break; /* DAS A */ case 0xbf: OP_MOVMR ( 4, XII, REG_A ); break; /* MOV A, xii */ case 0xc0: OP_DI ( 3 ); break; /* DI */ case 0xc1: OP_TCALL ( 8, 12 ); break; /* TCALL 12 */ case 0xc2: OP_SET ( 4, BIT_6 ); break; /* SET 6 */ case 0xc3: OP_BBS ( 5, BIT_6 ); break; /* BBS 6 */ case 0xc4: OP_MOVRM ( 4, REG_A, DP ); break; /* MOV dp, A */ case 0xc5: OP_MOVRM ( 5, REG_A, ABS ); break; /* MOV abs, A */ case 0xc6: OP_MOVRM ( 4, REG_A, XI ); break; /* MOV xi, A */ case 0xc7: OP_MOVRM ( 7, REG_A, DXI ); break; /* MOV dxi, A */ case 0xc8: OP_CMPR ( 2, REG_X, IMM ); break; /* CMP X, imm */ case 0xc9: OP_MOVRM ( 5, REG_X, ABS ); break; /* MOV abs, X */ case 0xca: OP_MOV1M ( 6 ); break; /* MOV1 C->bit */ case 0xcb: OP_MOVRM ( 4, REG_Y, DP ); break; /* MOV dp, Y */ case 0xcc: OP_MOVRM ( 5, REG_Y, ABS ); break; /* MOV abs, Y */ case 0xcd: OP_MOVMR ( 2, IMM, REG_X ); break; /* MOV X, imm */ case 0xce: OP_PULL ( 4, REG_X ); break; /* POP X */ case 0xcf: OP_MUL ( 9 ); break; /* MUL YA */ case 0xd0: OP_BCC ( 2, COND_NE() ); break; /* BNE */ case 0xd1: OP_TCALL ( 8, 13 ); break; /* TCALL 13 */ case 0xd2: OP_CLR ( 4, BIT_6 ); break; /* CLR 6 */ case 0xd3: OP_BBC ( 5, BIT_6 ); break; /* BBC 6 */ case 0xd4: OP_MOVRM ( 5, REG_A, DPX ); break; /* MOV dpx, A */ case 0xd5: OP_MOVRM ( 6, REG_A, ABX ); break; /* MOV abx, A */ case 0xd6: OP_MOVRM ( 6, REG_A, ABY ); break; /* MOV aby, A */ case 0xd7: OP_MOVRM ( 7, REG_A, DIY ); break; /* MOV diy, A */ case 0xd8: OP_MOVRM ( 4, REG_X, DP ); break; /* MOV dp, X */ case 0xd9: OP_MOVRM ( 5, REG_X, DPY ); break; /* MOV dpy, X */ case 0xda: OP_MOVWRM( 5 ); break; /* MOVW dp, YA */ case 0xdb: OP_MOVRM ( 5, REG_Y, DPX ); break; /* MOV dpx, Y */ case 0xdc: OP_DECR ( 2, REG_Y ); break; /* DEC Y */ case 0xdd: OP_MOVRR ( 2, REG_Y, REG_A ); break; /* MOV A, Y */ case 0xde: OP_CBNE ( 6, DPX ); break; /* CBNE dpx */ case 0xdf: OP_DAA ( 3 ); break; /* DAA */ case 0xe0: OP_CLRV ( 2 ); break; /* CLRV */ case 0xe1: OP_TCALL ( 8, 14 ); break; /* TCALL 14 */ case 0xe2: OP_SET ( 4, BIT_7 ); break; /* SET 7 */ case 0xe3: OP_BBS ( 5, BIT_7 ); break; /* BBS 7 */ case 0xe4: OP_MOVMR ( 3, DP, REG_A ); break; /* MOV A, dp */ case 0xe5: OP_MOVMR ( 4, ABS, REG_A ); break; /* MOV A, abs */ case 0xe6: OP_MOVMR ( 3, XI, REG_A ); break; /* MOV A, xi */ case 0xe7: OP_MOVMR ( 6, DXI, REG_A ); break; /* MOV A, dxi */ case 0xe8: OP_MOVMR ( 2, IMM, REG_A ); break; /* CMP A, imm */ case 0xe9: OP_MOVMR ( 4, ABS, REG_X ); break; /* MOV X, abs */ case 0xea: OP_NOT1 ( 5 ); break; /* NOT1 */ case 0xeb: OP_MOVMR ( 3, DP, REG_Y ); break; /* MOV Y, dp */ case 0xec: OP_MOVMR ( 4, ABS, REG_Y ); break; /* MOV Y, abs */ case 0xed: OP_NOTC ( 3 ); break; /* NOTC */ case 0xee: OP_PULL ( 4, REG_Y ); break; /* POP Y */ case 0xef: OP_SLEEP ( 1 ); break; /* SLEEP */ case 0xf0: OP_BCC ( 2, COND_EQ() ); break; /* BEQ */ case 0xf1: OP_TCALL ( 8, 15 ); break; /* TCALL1 5 */ case 0xf2: OP_CLR ( 4, BIT_7 ); break; /* CLR 7 */ case 0xf3: OP_BBC ( 5, BIT_7 ); break; /* BBC 7 */ case 0xf4: OP_MOVMR ( 4, DPX, REG_A ); break; /* MOV A, dpx */ case 0xf5: OP_MOVMR ( 5, ABX, REG_A ); break; /* MOV A, abx */ case 0xf6: OP_MOVMR ( 5, ABY, REG_A ); break; /* MOV A, aby */ case 0xf7: OP_MOVMR ( 6, DIY, REG_A ); break; /* MOV A, diy */ case 0xf8: OP_MOVMR ( 3, DP, REG_X ); break; /* MOV X, dp */ case 0xf9: OP_MOVMR ( 4, DPY, REG_X ); break; /* MOV X, dpy */ case 0xfa: OP_MOVMM ( 5, DP, DP ); break; /* MOV dp, dp */ case 0xfb: OP_MOVMR ( 4, DPX, REG_Y ); break; /* MOV Y, DPX */ case 0xfc: OP_INCR ( 2, REG_Y ); break; /* INC Y */ case 0xfd: OP_MOVRR ( 2, REG_A, REG_Y ); break; /* MOV Y, A */ case 0xfe: OP_DBNZR ( 4 ); break; /* DBNZ Y */ case 0xff: OP_STOP ( 1 ); break; /* STOP */ } } } /* ======================================================================== */ /* ============================== END OF FILE ============================= */ /* ======================================================================== */
gpl-2.0
XMelancholy/kernel_snda_u8500
drivers/modem/shrm/shrm_protocol.c
7
46942
/* * Copyright (C) ST-Ericsson SA 2010 * * Author: Biju Das <biju.das@stericsson.com> for ST-Ericsson * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> for ST-Ericsson * Author: Arun Murthy <arun.murthy@stericsson.com> for ST-Ericsson * License terms: GNU General Public License (GPL) version 2 */ #include <linux/hrtimer.h> #include <linux/delay.h> #include <linux/netlink.h> #include <linux/kthread.h> #include <linux/modem/shrm/shrm.h> #include <linux/modem/shrm/shrm_driver.h> #include <linux/modem/shrm/shrm_private.h> #include <linux/modem/shrm/shrm_net.h> #include <linux/modem/modem_client.h> #include <linux/mfd/dbx500-prcmu.h> #include <linux/mfd/abx500.h> #include <mach/reboot_reasons.h> #include <mach/suspend.h> #include <mach/prcmu-debug.h> #include <mach/open_modem_shared_memory.h> #define CREATE_TRACE_POINTS #include "shrm-trace.h" #define L2_HEADER_ISI 0x0 #define L2_HEADER_RPC 0x1 #define L2_HEADER_AUDIO 0x2 #define L2_HEADER_SECURITY 0x3 #define L2_HEADER_COMMON_SIMPLE_LOOPBACK 0xC0 #define L2_HEADER_COMMON_ADVANCED_LOOPBACK 0xC1 #define L2_HEADER_AUDIO_SIMPLE_LOOPBACK 0x80 #define L2_HEADER_AUDIO_ADVANCED_LOOPBACK 0x81 #define L2_HEADER_CIQ 0xC3 #define L2_HEADER_RTC_CALIBRATION 0xC8 #define MAX_PAYLOAD 1024 #define MOD_STUCK_TIMEOUT 6 #define FIFO_FULL_TIMEOUT 1 #define PRCM_MOD_AWAKE_STATUS_PRCM_MOD_COREPD_AWAKE BIT(0) #define PRCM_MOD_AWAKE_STATUS_PRCM_MOD_AAPD_AWAKE BIT(1) #define PRCM_MOD_AWAKE_STATUS_PRCM_MOD_VMODEM_OFF_ISO BIT(2) #define PRCM_MOD_PURESET BIT(0) #define PRCM_MOD_SW_RESET BIT(1) #define PRCM_HOSTACCESS_REQ 0x334 #define PRCM_MOD_AWAKE_STATUS 0x4A0 #define PRCM_MOD_RESETN_VAL 0x204 static u8 boot_state = BOOT_INIT; static u8 recieve_common_msg[8*1024]; static u8 recieve_audio_msg[8*1024]; static received_msg_handler rx_common_handler; static received_msg_handler rx_audio_handler; static struct hrtimer timer; static struct hrtimer mod_stuck_timer_0; static struct hrtimer mod_stuck_timer_1; static struct hrtimer fifo_full_timer; struct sock *shrm_nl_sk; static char shrm_common_tx_state = SHRM_SLEEP_STATE; static char shrm_common_rx_state = SHRM_SLEEP_STATE; static char shrm_audio_tx_state = SHRM_SLEEP_STATE; static char shrm_audio_rx_state = SHRM_SLEEP_STATE; static atomic_t ac_sleep_disable_count = ATOMIC_INIT(0); static atomic_t ac_msg_pend_1 = ATOMIC_INIT(0); static atomic_t mod_stuck = ATOMIC_INIT(0); static atomic_t fifo_full = ATOMIC_INIT(0); static struct shrm_dev *shm_dev; /* Spin lock and tasklet declaration */ DECLARE_TASKLET(shm_ca_0_tasklet, shm_ca_msgpending_0_tasklet, 0); DECLARE_TASKLET(shm_ca_1_tasklet, shm_ca_msgpending_1_tasklet, 0); DECLARE_TASKLET(shm_ac_read_0_tasklet, shm_ac_read_notif_0_tasklet, 0); DECLARE_TASKLET(shm_ac_read_1_tasklet, shm_ac_read_notif_1_tasklet, 0); static DEFINE_MUTEX(ac_state_mutex); static DEFINE_SPINLOCK(ca_common_lock); static DEFINE_SPINLOCK(ca_audio_lock); static DEFINE_SPINLOCK(ca_wake_req_lock); static DEFINE_SPINLOCK(boot_lock); static DEFINE_SPINLOCK(mod_stuck_lock); static DEFINE_SPINLOCK(start_timer_lock); enum shrm_nl { SHRM_NL_MOD_RESET = 1, SHRM_NL_MOD_QUERY_STATE, SHRM_NL_USER_MOD_RESET, SHRM_NL_STATUS_MOD_ONLINE, SHRM_NL_STATUS_MOD_OFFLINE, }; static int check_modem_in_reset(struct shrm_dev *shrm); static inline int shm_chk_and_req_mod(struct shrm_dev *shrm) { int err = 0; if (!modem_is_requested(shrm->modem)) { err = clk_enable(shrm->clk); if (err) { dev_err(shrm->dev, "failed to enable sysclk\n"); shrm->clk_enabled = false; } else { shrm->clk_enabled = true; } } if (modem_request(shrm->modem) < 0) { dev_err(shrm->dev, "prcmu_ac_wake_req failed, initiating MSR\n"); queue_kthread_work(&shrm->shm_mod_stuck_kw, &shrm->shm_print_dbg_info); queue_kthread_work(&shrm->shm_mod_stuck_kw, &shrm->shm_mod_reset_req); err = -EFAULT; } return err; } static void shm_sysclk_disable_work(struct kthread_work *work) { struct shrm_dev *shrm = container_of(work, struct shrm_dev, shm_clk_disable_req); clk_disable(shrm->clk); shrm->clk_enabled = false; } void shm_print_dbg_info_work(struct kthread_work *work) { abx500_dump_all_banks(); prcmu_debug_dump_regs(); prcmu_debug_dump_data_mem(); } void shm_mod_reset_req_work(struct kthread_work *work) { unsigned long flags; struct shrm_dev *shrm = container_of(work, struct shrm_dev, shm_mod_reset_req); /* update the boot_state */ spin_lock_irqsave(&boot_lock, flags); if (boot_state != BOOT_DONE) { dev_info(shrm->dev, "Modem in reset state\n"); spin_unlock_irqrestore(&boot_lock, flags); return; } boot_state = BOOT_UNKNOWN; wmb(); spin_unlock_irqrestore(&boot_lock, flags); dev_err(shrm->dev, "APE makes modem reset\n"); prcmu_modem_reset(); } static void shm_ac_sleep_req_work(struct kthread_work *work) { struct shrm_dev *shrm = container_of(work, struct shrm_dev, shm_ac_sleep_req); mutex_lock(&ac_state_mutex); if (atomic_read(&ac_sleep_disable_count) == 0) modem_release(shrm->modem); mutex_unlock(&ac_state_mutex); } static void shm_ac_wake_req_work(struct kthread_work *work) { struct shrm_dev *shrm = container_of(work, struct shrm_dev, shm_ac_wake_req); mutex_lock(&ac_state_mutex); if (shm_chk_and_req_mod(shrm)) { mutex_unlock(&ac_state_mutex); return; } mutex_unlock(&ac_state_mutex); } static u32 get_host_accessport_val(void) { u32 prcm_hostaccess; u32 status; u32 reset_stats; status = (prcmu_read(PRCM_MOD_AWAKE_STATUS) & 0x03); reset_stats = (prcmu_read(PRCM_MOD_RESETN_VAL) & 0x03); prcm_hostaccess = prcmu_read(PRCM_HOSTACCESS_REQ); wmb(); prcm_hostaccess = ((prcm_hostaccess & 0x01) && (status == (PRCM_MOD_AWAKE_STATUS_PRCM_MOD_AAPD_AWAKE | PRCM_MOD_AWAKE_STATUS_PRCM_MOD_COREPD_AWAKE)) && (reset_stats == (PRCM_MOD_SW_RESET | PRCM_MOD_PURESET))); return prcm_hostaccess; } static enum hrtimer_restart shm_fifo_full_timeout(struct hrtimer *timer) { queue_kthread_work(&shm_dev->shm_mod_stuck_kw, &shm_dev->shm_mod_reset_req); queue_kthread_work(&shm_dev->shm_mod_stuck_kw, &shm_dev->shm_print_dbg_info); return HRTIMER_NORESTART; } static enum hrtimer_restart shm_mod_stuck_timeout(struct hrtimer *timer) { unsigned long flags; spin_lock_irqsave(&mod_stuck_lock, flags); /* Check MSR is already in progress */ if (shm_dev->msr_flag || boot_state == BOOT_UNKNOWN || atomic_read(&mod_stuck) || atomic_read(&fifo_full)) { spin_unlock_irqrestore(&mod_stuck_lock, flags); return HRTIMER_NORESTART; } atomic_set(&mod_stuck, 1); spin_unlock_irqrestore(&mod_stuck_lock, flags); dev_err(shm_dev->dev, "No response from modem, timeout %dsec\n", MOD_STUCK_TIMEOUT); dev_err(shm_dev->dev, "APE initiating MSR\n"); queue_kthread_work(&shm_dev->shm_mod_stuck_kw, &shm_dev->shm_mod_reset_req); queue_kthread_work(&shm_dev->shm_mod_stuck_kw, &shm_dev->shm_print_dbg_info); return HRTIMER_NORESTART; } static enum hrtimer_restart callback(struct hrtimer *timer) { unsigned long flags; spin_lock_irqsave(&ca_wake_req_lock, flags); if (((shrm_common_rx_state == SHRM_IDLE) || (shrm_common_rx_state == SHRM_SLEEP_STATE)) && ((shrm_common_tx_state == SHRM_IDLE) || (shrm_common_tx_state == SHRM_SLEEP_STATE)) && ((shrm_audio_rx_state == SHRM_IDLE) || (shrm_audio_rx_state == SHRM_SLEEP_STATE)) && ((shrm_audio_tx_state == SHRM_IDLE) || (shrm_audio_tx_state == SHRM_SLEEP_STATE))) { shrm_common_rx_state = SHRM_SLEEP_STATE; shrm_audio_rx_state = SHRM_SLEEP_STATE; shrm_common_tx_state = SHRM_SLEEP_STATE; shrm_audio_tx_state = SHRM_SLEEP_STATE; queue_kthread_work(&shm_dev->shm_ac_sleep_kw, &shm_dev->shm_ac_sleep_req); } spin_unlock_irqrestore(&ca_wake_req_lock, flags); return HRTIMER_NORESTART; } int nl_send_multicast_message(int msg, gfp_t gfp_mask) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh = NULL; int err; /* prepare netlink message */ skb = alloc_skb(NLMSG_SPACE(MAX_PAYLOAD), gfp_mask); if (!skb) { dev_err(shm_dev->dev, "%s:alloc_skb failed\n", __func__); err = -ENOMEM; goto out; } nlh = (struct nlmsghdr *)skb->data; nlh->nlmsg_len = NLMSG_SPACE(MAX_PAYLOAD); dev_dbg(shm_dev->dev, "nlh->nlmsg_len = %d\n", nlh->nlmsg_len); nlh->nlmsg_pid = 0; /* from kernel */ nlh->nlmsg_flags = 0; *(int *)NLMSG_DATA(nlh) = msg; skb_put(skb, MAX_PAYLOAD); /* sender is in group 1<<0 */ NETLINK_CB(skb).pid = 0; /* from kernel */ /* to mcast group 1<<0 */ NETLINK_CB(skb).dst_group = 1; /*multicast the message to all listening processes*/ err = netlink_broadcast(shrm_nl_sk, skb, 0, 1, gfp_mask); dev_dbg(shm_dev->dev, "ret val from nl-multicast = %d\n", err); out: return err; } static void nl_send_unicast_message(int dst_pid) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh = NULL; int err; int bt_state; unsigned long flags; dev_dbg(shm_dev->dev, "Sending unicast message\n"); /* prepare the NL message for unicast */ skb = alloc_skb(NLMSG_SPACE(MAX_PAYLOAD), GFP_KERNEL); if (!skb) { dev_err(shm_dev->dev, "%s:alloc_skb failed\n", __func__); return; } nlh = (struct nlmsghdr *)skb->data; nlh->nlmsg_len = NLMSG_SPACE(MAX_PAYLOAD); dev_dbg(shm_dev->dev, "nlh->nlmsg_len = %d\n", nlh->nlmsg_len); nlh->nlmsg_pid = 0; /* from kernel */ nlh->nlmsg_flags = 0; spin_lock_irqsave(&boot_lock, flags); bt_state = boot_state; spin_unlock_irqrestore(&boot_lock, flags); if (bt_state == BOOT_DONE) *(int *)NLMSG_DATA(nlh) = SHRM_NL_STATUS_MOD_ONLINE; else *(int *)NLMSG_DATA(nlh) = SHRM_NL_STATUS_MOD_OFFLINE; skb_put(skb, MAX_PAYLOAD); /* sender is in group 1<<0 */ NETLINK_CB(skb).pid = 0; /* from kernel */ NETLINK_CB(skb).dst_group = 0; /*unicast the message to the querying processes*/ err = netlink_unicast(shrm_nl_sk, skb, dst_pid, MSG_DONTWAIT); dev_dbg(shm_dev->dev, "ret val from nl-unicast = %d\n", err); } static int check_modem_in_reset(struct shrm_dev *shrm) { u8 bt_state; unsigned long flags; spin_lock_irqsave(&boot_lock, flags); bt_state = boot_state; spin_unlock_irqrestore(&boot_lock, flags); #ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET if (bt_state != BOOT_UNKNOWN && (!readl(shrm->ca_reset_status_rptr))) return 0; else return -ENODEV; #else /* * this check won't be applicable and won't work correctly * if modem-silent-feature is not enabled * so, simply return 0 */ return 0; #endif } void shm_ca_msgpending_0_tasklet(unsigned long tasklet_data) { struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data; u32 reader_local_rptr; u32 reader_local_wptr; u32 shared_rptr; u32 config = 0, version = 0; unsigned long flags; dev_dbg(shrm->dev, "%s IN\n", __func__); /* * if sysclk is enabled then disable, as modem is awake it would * have requested for sysclk */ if (shrm->clk_enabled) queue_kthread_work(&shrm->shm_mod_stuck_kw, &shrm->shm_clk_disable_req); /* Interprocess locking */ spin_lock(&ca_common_lock); /* Update_reader_local_wptr with shared_wptr */ update_ca_common_local_wptr(shrm); get_reader_pointers(COMMON_CHANNEL, &reader_local_rptr, &reader_local_wptr, &shared_rptr); set_ca_msg_0_read_notif_send(0); if (boot_state == BOOT_DONE) { shrm_common_rx_state = SHRM_PTR_FREE; if (reader_local_rptr != shared_rptr) ca_msg_read_notification_0(shrm); if (reader_local_rptr != reader_local_wptr) receive_messages_common(shrm); get_reader_pointers(COMMON_CHANNEL, &reader_local_rptr, &reader_local_wptr, &shared_rptr); if (reader_local_rptr == reader_local_wptr) shrm_common_rx_state = SHRM_IDLE; } else { /* BOOT phase.only a BOOT_RESP should be in FIFO */ if (boot_state != BOOT_INFO_SYNC) { if (!read_boot_info_req(shrm, &config, &version)) { dev_err(shrm->dev, "Unable to read boot state\n"); return; } /* SendReadNotification */ ca_msg_read_notification_0(shrm); /* * Check the version number before * sending Boot info response */ /* send MsgPending notification */ write_boot_info_resp(shrm, config, version); spin_lock_irqsave(&boot_lock, flags); boot_state = BOOT_INFO_SYNC; spin_unlock_irqrestore(&boot_lock, flags); dev_info(shrm->dev, "BOOT_INFO_SYNC\n"); queue_kthread_work(&shrm->shm_common_ch_wr_kw, &shrm->send_ac_msg_pend_notify_0); } else { ca_msg_read_notification_0(shrm); dev_info(shrm->dev, "BOOT_INFO_SYNC\n"); } } /* Interprocess locking */ spin_unlock(&ca_common_lock); dev_dbg(shrm->dev, "%s OUT\n", __func__); } void shm_ca_msgpending_1_tasklet(unsigned long tasklet_data) { struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data; u32 reader_local_rptr; u32 reader_local_wptr; u32 shared_rptr; /* * This function is called when CaMsgPendingNotification Trigerred * by CMU. It means that CMU has wrote a message into Ca Audio FIFO */ dev_dbg(shrm->dev, "%s IN\n", __func__); /* * if sysclk is enabled then disable, as modem is awake it would have * reqested for sysclk */ if (shrm->clk_enabled) queue_kthread_work(&shrm->shm_mod_stuck_kw, &shrm->shm_clk_disable_req); if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown\n", __func__); return; } /* Interprocess locking */ spin_lock(&ca_audio_lock); /* Update_reader_local_wptr(with shared_wptr) */ update_ca_audio_local_wptr(shrm); get_reader_pointers(AUDIO_CHANNEL, &reader_local_rptr, &reader_local_wptr, &shared_rptr); set_ca_msg_1_read_notif_send(0); if (boot_state != BOOT_DONE) { dev_err(shrm->dev, "Boot Error\n"); return; } shrm_audio_rx_state = SHRM_PTR_FREE; /* Check we already read the message */ if (reader_local_rptr != shared_rptr) ca_msg_read_notification_1(shrm); if (reader_local_rptr != reader_local_wptr) receive_messages_audio(shrm); get_reader_pointers(AUDIO_CHANNEL, &reader_local_rptr, &reader_local_wptr, &shared_rptr); if (reader_local_rptr == reader_local_wptr) shrm_audio_rx_state = SHRM_IDLE; /* Interprocess locking */ spin_unlock(&ca_audio_lock); dev_dbg(shrm->dev, "%s OUT\n", __func__); } void shm_ac_read_notif_0_tasklet(unsigned long tasklet_data) { struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data; u32 writer_local_rptr; u32 writer_local_wptr; u32 shared_wptr; unsigned long flags; dev_dbg(shrm->dev, "%s IN\n", __func__); /* Update writer_local_rptrwith shared_rptr */ update_ac_common_local_rptr(shrm); get_writer_pointers(COMMON_CHANNEL, &writer_local_rptr, &writer_local_wptr, &shared_wptr); if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown\n", __func__); return; } if (boot_state == BOOT_INFO_SYNC) { /* BOOT_RESP sent by APE has been received by CMT */ spin_lock_irqsave(&boot_lock, flags); boot_state = BOOT_DONE; spin_unlock_irqrestore(&boot_lock, flags); dev_info(shrm->dev, "IPC_ISA BOOT_DONE\n"); if (shrm->msr_flag) { shrm_start_netdev(shrm->ndev); shrm->msr_flag = 0; /* multicast that modem is online */ nl_send_multicast_message(SHRM_NL_STATUS_MOD_ONLINE, GFP_ATOMIC); } } else if (boot_state == BOOT_DONE) { if (writer_local_rptr != writer_local_wptr) { shrm_common_tx_state = SHRM_PTR_FREE; queue_kthread_work(&shrm->shm_common_ch_wr_kw, &shrm->send_ac_msg_pend_notify_0); } else { shrm_common_tx_state = SHRM_IDLE; shrm_restart_netdev(shrm->ndev); } } else { dev_err(shrm->dev, "Invalid boot state\n"); } /* start timer here */ hrtimer_start(&timer, ktime_set(0, 25*NSEC_PER_MSEC), HRTIMER_MODE_REL); atomic_dec(&ac_sleep_disable_count); dev_dbg(shrm->dev, "%s OUT\n", __func__); } void shm_ac_read_notif_1_tasklet(unsigned long tasklet_data) { struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data; u32 writer_local_rptr; u32 writer_local_wptr; u32 shared_wptr; dev_dbg(shrm->dev, "%s IN\n", __func__); if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown\n", __func__); return; } /* Update writer_local_rptr(with shared_rptr) */ update_ac_audio_local_rptr(shrm); get_writer_pointers(AUDIO_CHANNEL, &writer_local_rptr, &writer_local_wptr, &shared_wptr); if (boot_state != BOOT_DONE) { dev_err(shrm->dev, "Error Case in boot state\n"); return; } if (writer_local_rptr != writer_local_wptr) { shrm_audio_tx_state = SHRM_PTR_FREE; queue_kthread_work(&shrm->shm_audio_ch_wr_kw, &shrm->send_ac_msg_pend_notify_1); } else { shrm_audio_tx_state = SHRM_IDLE; } /* start timer here */ hrtimer_start(&timer, ktime_set(0, 25*NSEC_PER_MSEC), HRTIMER_MODE_REL); atomic_dec(&ac_sleep_disable_count); atomic_dec(&ac_msg_pend_1); dev_dbg(shrm->dev, "%s OUT\n", __func__); } void shm_ca_sleep_req_work(struct kthread_work *work) { unsigned long flags; struct shrm_dev *shrm = container_of(work, struct shrm_dev, shm_ca_sleep_req); dev_dbg(shrm->dev, "%s:IRQ_PRCMU_CA_SLEEP\n", __func__); local_irq_save(flags); preempt_disable(); if ((boot_state != BOOT_DONE) || (readl(shm_dev->ca_reset_status_rptr))) { dev_err(shm_dev->dev, "%s:Modem state reset or unknown\n", __func__); preempt_enable(); local_irq_restore(flags); return; } shrm_common_rx_state = SHRM_IDLE; shrm_audio_rx_state = SHRM_IDLE; if (!get_host_accessport_val()) { dev_err(shrm->dev, "%s: host_accessport is low\n", __func__); queue_kthread_work(&shrm->shm_mod_stuck_kw, &shrm->shm_mod_reset_req); preempt_enable(); local_irq_restore(flags); return; } trace_shrm_ca_sleep_req(0); writel((1<<GOP_CA_WAKE_ACK_BIT), shrm->intr_base + GOP_SET_REGISTER_BASE); preempt_enable(); local_irq_restore(flags); hrtimer_start(&timer, ktime_set(0, 10*NSEC_PER_MSEC), HRTIMER_MODE_REL); if (suspend_sleep_is_blocked()) { suspend_unblock_sleep(); } atomic_dec(&ac_sleep_disable_count); } void shm_ca_wake_req_work(struct kthread_work *work) { unsigned long flags; struct shrm_dev *shrm = container_of(work, struct shrm_dev, shm_ca_wake_req); /* initialize the FIFO Variables */ if (boot_state == BOOT_INIT) { /* Unlock the shared memory before accessing it */ if (open_modem_shared_memory()) { dev_err(shrm->dev, "Unable to unlock the shared memory\n"); //return; } shm_fifo_init(shrm); } mutex_lock(&ac_state_mutex); if (shm_chk_and_req_mod(shrm)) { mutex_unlock(&ac_state_mutex); return; } mutex_unlock(&ac_state_mutex); local_irq_save(flags); preempt_disable(); /* send ca_wake_ack_interrupt to CMU */ if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown\n", __func__); preempt_enable(); local_irq_restore(flags); return; } if (!get_host_accessport_val()) { dev_err(shrm->dev, "%s: host_accessport is low\n", __func__); queue_kthread_work(&shrm->shm_mod_stuck_kw, &shrm->shm_mod_reset_req); preempt_enable(); local_irq_restore(flags); } trace_shrm_ca_wake_req(0); /* send ca_wake_ack_interrupt to CMU */ writel((1<<GOP_CA_WAKE_ACK_BIT), shrm->intr_base + GOP_SET_REGISTER_BASE); preempt_enable(); local_irq_restore(flags); } #ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET static int shrm_modem_reset_sequence(struct shrm_dev *shrm) { int err; unsigned long flags; hrtimer_cancel(&timer); hrtimer_cancel(&mod_stuck_timer_0); hrtimer_cancel(&mod_stuck_timer_1); hrtimer_cancel(&fifo_full_timer); atomic_set(&mod_stuck, 0); atomic_set(&fifo_full, 0); tasklet_disable_nosync(&shm_ac_read_0_tasklet); tasklet_disable_nosync(&shm_ac_read_1_tasklet); tasklet_disable_nosync(&shm_ca_0_tasklet); tasklet_disable_nosync(&shm_ca_1_tasklet); /* * keep the count to 0 so that we can bring down the line * for normal ac-wake and ac-sleep logic */ atomic_set(&ac_sleep_disable_count, 0); atomic_set(&ac_msg_pend_1, 0); /* workaround for MSR */ queue_kthread_work(&shrm->shm_ac_wake_kw, &shrm->shm_ac_wake_req); /* reset char device queues */ shrm_char_reset_queues(shrm); /* reset protocol states */ shrm_common_tx_state = SHRM_SLEEP_STATE; shrm_common_rx_state = SHRM_SLEEP_STATE; shrm_audio_tx_state = SHRM_SLEEP_STATE; shrm_audio_rx_state = SHRM_SLEEP_STATE; /* set the msr flag */ shrm->msr_flag = 1; /* multicast that modem is going to reset */ err = nl_send_multicast_message(SHRM_NL_MOD_RESET, GFP_ATOMIC); /* reset the boot state */ spin_lock_irqsave(&boot_lock, flags); boot_state = BOOT_INIT; spin_unlock_irqrestore(&boot_lock, flags); tasklet_enable(&shm_ac_read_0_tasklet); tasklet_enable(&shm_ac_read_1_tasklet); tasklet_enable(&shm_ca_0_tasklet); tasklet_enable(&shm_ca_1_tasklet); /* re-enable irqs */ enable_irq(shrm->ac_read_notif_0_irq); enable_irq(shrm->ac_read_notif_1_irq); enable_irq(shrm->ca_msg_pending_notif_0_irq); enable_irq(shrm->ca_msg_pending_notif_1_irq); enable_irq(IRQ_PRCMU_CA_WAKE); enable_irq(IRQ_PRCMU_CA_SLEEP); if (suspend_sleep_is_blocked()) { suspend_unblock_sleep(); } return err; } #endif static void shrm_modem_reset_callback(unsigned long tasklet_data) { struct shrm_dev *shrm = (struct shrm_dev *)tasklet_data; dev_err(shrm->dev, "Received mod_reset_req interrupt\n"); #ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET { int err; dev_info(shrm->dev, "Initiating Modem silent reset\n"); err = shrm_modem_reset_sequence(shrm); if (err) dev_err(shrm->dev, "Failed multicast of modem reset\n"); } #else dev_info(shrm->dev, "Modem in reset loop, doing System reset\n"); /* Call the PRCMU reset API */ prcmu_system_reset(SW_RESET_NO_ARGUMENT); #endif } DECLARE_TASKLET(shrm_sw_reset_callback, shrm_modem_reset_callback, IRQ_PRCMU_MODEM_SW_RESET_REQ); static irqreturn_t shrm_prcmu_irq_handler(int irq, void *data) { struct shrm_dev *shrm = data; unsigned long flags; switch (irq) { case IRQ_PRCMU_CA_WAKE: suspend_block_sleep(); if (shrm->msr_flag) atomic_set(&ac_sleep_disable_count, 0); atomic_inc(&ac_sleep_disable_count); queue_kthread_work(&shrm->shm_ca_wake_kw, &shrm->shm_ca_wake_req); break; case IRQ_PRCMU_CA_SLEEP: queue_kthread_work(&shrm->shm_ca_wake_kw, &shrm->shm_ca_sleep_req); break; case IRQ_PRCMU_MODEM_SW_RESET_REQ: /* update the boot_state */ spin_lock_irqsave(&boot_lock, flags); boot_state = BOOT_UNKNOWN; /* * put a barrier over here to make sure boot_state is updated * else, it is seen that some of already executing modem * irqs or tasklets fail the protocol checks and will ultimately * try to acces the modem causing system to hang. * This is particularly seen with user-space initiated modem reset */ wmb(); spin_unlock_irqrestore(&boot_lock, flags); disable_irq_nosync(shrm->ac_read_notif_0_irq); disable_irq_nosync(shrm->ac_read_notif_1_irq); disable_irq_nosync(shrm->ca_msg_pending_notif_0_irq); disable_irq_nosync(shrm->ca_msg_pending_notif_1_irq); disable_irq_nosync(IRQ_PRCMU_CA_WAKE); disable_irq_nosync(IRQ_PRCMU_CA_SLEEP); /* stop network queue */ shrm_stop_netdev(shrm->ndev); shrm_sw_reset_callback.data = (unsigned long)shrm; tasklet_schedule(&shrm_sw_reset_callback); break; default: dev_err(shrm->dev, "%s: => IRQ %d\n", __func__, irq); return IRQ_NONE; } return IRQ_HANDLED; } static void send_ac_msg_pend_notify_0_work(struct kthread_work *work) { unsigned long flags; struct shrm_dev *shrm = container_of(work, struct shrm_dev, send_ac_msg_pend_notify_0); dev_dbg(shrm->dev, "%s IN\n", __func__); update_ac_common_shared_wptr(shrm); mutex_lock(&ac_state_mutex); atomic_inc(&ac_sleep_disable_count); if (shm_chk_and_req_mod(shrm)) { mutex_unlock(&ac_state_mutex); return; } mutex_unlock(&ac_state_mutex); spin_lock_irqsave(&start_timer_lock, flags); if (!get_host_accessport_val()) { dev_err(shrm->dev, "%s: host_accessport is low\n", __func__); queue_kthread_work(&shrm->shm_mod_stuck_kw, &shrm->shm_mod_reset_req); spin_unlock_irqrestore(&start_timer_lock, flags); return; } if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); spin_unlock_irqrestore(&start_timer_lock, flags); return; } trace_shrm_send_ac_msg_pend_notify_0(0); /* Trigger AcMsgPendingNotification to CMU */ writel((1<<GOP_COMMON_AC_MSG_PENDING_NOTIFICATION_BIT), shrm->intr_base + GOP_SET_REGISTER_BASE); /* timer to detect modem stuck or hang */ hrtimer_start(&mod_stuck_timer_0, ktime_set(MOD_STUCK_TIMEOUT, 0), HRTIMER_MODE_REL); spin_unlock_irqrestore(&start_timer_lock, flags); if (shrm_common_tx_state == SHRM_PTR_FREE) shrm_common_tx_state = SHRM_PTR_BUSY; dev_dbg(shrm->dev, "%s OUT\n", __func__); } static void send_ac_msg_pend_notify_1_work(struct kthread_work *work) { unsigned long flags; struct shrm_dev *shrm = container_of(work, struct shrm_dev, send_ac_msg_pend_notify_1); dev_dbg(shrm->dev, "%s IN\n", __func__); /* Update shared_wptr with writer_local_wptr) */ update_ac_audio_shared_wptr(shrm); mutex_lock(&ac_state_mutex); if (!atomic_read(&ac_msg_pend_1)) { atomic_inc(&ac_sleep_disable_count); atomic_inc(&ac_msg_pend_1); } if (shm_chk_and_req_mod(shrm)) { mutex_unlock(&ac_state_mutex); return; } mutex_unlock(&ac_state_mutex); spin_lock_irqsave(&start_timer_lock, flags); if (!get_host_accessport_val()) { dev_err(shrm->dev, "%s: host_accessport is low\n", __func__); queue_kthread_work(&shrm->shm_mod_stuck_kw, &shrm->shm_mod_reset_req); spin_unlock_irqrestore(&start_timer_lock, flags); return; } if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); spin_unlock_irqrestore(&start_timer_lock, flags); return; } trace_shrm_send_ac_msg_pend_notify_1(0); /* Trigger AcMsgPendingNotification to CMU */ writel((1<<GOP_AUDIO_AC_MSG_PENDING_NOTIFICATION_BIT), shrm->intr_base + GOP_SET_REGISTER_BASE); /* timer to detect modem stuck or hang */ hrtimer_start(&mod_stuck_timer_1, ktime_set(MOD_STUCK_TIMEOUT, 0), HRTIMER_MODE_REL); spin_unlock_irqrestore(&start_timer_lock, flags); if (shrm_audio_tx_state == SHRM_PTR_FREE) shrm_audio_tx_state = SHRM_PTR_BUSY; dev_dbg(shrm->dev, "%s OUT\n", __func__); } void shm_nl_receive(struct sk_buff *skb) { struct nlmsghdr *nlh = NULL; int msg; dev_dbg(shm_dev->dev, "Received NL msg from user-space\n"); nlh = (struct nlmsghdr *)skb->data; msg = *((int *)(NLMSG_DATA(nlh))); switch (msg) { case SHRM_NL_MOD_QUERY_STATE: dev_dbg(shm_dev->dev, "mod-query-state from user-space\n"); nl_send_unicast_message(nlh->nlmsg_pid); break; case SHRM_NL_USER_MOD_RESET: dev_info(shm_dev->dev, "user-space inited mod-reset-req\n"); dev_info(shm_dev->dev, "PCRMU resets modem\n"); if (atomic_read(&mod_stuck) || atomic_read(&fifo_full)) { dev_info(shm_dev->dev, "Modem reset already in progress\n"); break; } atomic_set(&mod_stuck, 1); queue_kthread_work(&shm_dev->shm_mod_stuck_kw, &shm_dev->shm_mod_reset_req); break; default: dev_err(shm_dev->dev, "Invalid NL msg from user-space\n"); break; }; } int shrm_protocol_init(struct shrm_dev *shrm, received_msg_handler common_rx_handler, received_msg_handler audio_rx_handler) { int err; struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; shm_dev = shrm; boot_state = BOOT_INIT; dev_info(shrm->dev, "IPC_ISA BOOT_INIT\n"); rx_common_handler = common_rx_handler; rx_audio_handler = audio_rx_handler; atomic_set(&ac_sleep_disable_count, 0); hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); timer.function = callback; hrtimer_init(&mod_stuck_timer_0, CLOCK_MONOTONIC, HRTIMER_MODE_REL); mod_stuck_timer_0.function = shm_mod_stuck_timeout; hrtimer_init(&mod_stuck_timer_1, CLOCK_MONOTONIC, HRTIMER_MODE_REL); mod_stuck_timer_1.function = shm_mod_stuck_timeout; hrtimer_init(&fifo_full_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); fifo_full_timer.function = shm_fifo_full_timeout; init_kthread_worker(&shrm->shm_common_ch_wr_kw); shrm->shm_common_ch_wr_kw_task = kthread_run(kthread_worker_fn, &shrm->shm_common_ch_wr_kw, "shm_common_channel_irq"); if (IS_ERR(shrm->shm_common_ch_wr_kw_task)) { dev_err(shrm->dev, "failed to create work task\n"); return -ENOMEM; } init_kthread_worker(&shrm->shm_audio_ch_wr_kw); shrm->shm_audio_ch_wr_kw_task = kthread_run(kthread_worker_fn, &shrm->shm_audio_ch_wr_kw, "shm_audio_channel_irq"); if (IS_ERR(shrm->shm_audio_ch_wr_kw_task)) { dev_err(shrm->dev, "failed to create work task\n"); err = -ENOMEM; goto free_kw1; } /* must use the FIFO scheduler as it is realtime sensitive */ sched_setscheduler(shrm->shm_audio_ch_wr_kw_task, SCHED_FIFO, &param); init_kthread_worker(&shrm->shm_ac_wake_kw); shrm->shm_ac_wake_kw_task = kthread_run(kthread_worker_fn, &shrm->shm_ac_wake_kw, "shm_ac_wake_req"); if (IS_ERR(shrm->shm_ac_wake_kw_task)) { dev_err(shrm->dev, "failed to create work task\n"); err = -ENOMEM; goto free_kw2; } /* must use the FIFO scheduler as it is realtime sensitive */ sched_setscheduler(shrm->shm_ac_wake_kw_task, SCHED_FIFO, &param); init_kthread_worker(&shrm->shm_ca_wake_kw); shrm->shm_ca_wake_kw_task = kthread_run(kthread_worker_fn, &shrm->shm_ca_wake_kw, "shm_ca_wake_req"); if (IS_ERR(shrm->shm_ca_wake_kw_task)) { dev_err(shrm->dev, "failed to create work task\n"); err = -ENOMEM; goto free_kw3; } /* must use the FIFO scheduler as it is realtime sensitive */ sched_setscheduler(shrm->shm_ca_wake_kw_task, SCHED_FIFO, &param); init_kthread_worker(&shrm->shm_ac_sleep_kw); shrm->shm_ac_sleep_kw_task = kthread_run(kthread_worker_fn, &shrm->shm_ac_sleep_kw, "shm_ac_sleep_req"); if (IS_ERR(shrm->shm_ac_sleep_kw_task)) { dev_err(shrm->dev, "failed to create work task\n"); err = -ENOMEM; goto free_kw4; } init_kthread_worker(&shrm->shm_mod_stuck_kw); shrm->shm_mod_stuck_kw_task = kthread_run(kthread_worker_fn, &shrm->shm_mod_stuck_kw, "shm_mod_reset_req"); if (IS_ERR(shrm->shm_mod_stuck_kw_task)) { dev_err(shrm->dev, "failed to create work task\n"); err = -ENOMEM; goto free_kw5; } init_kthread_work(&shrm->send_ac_msg_pend_notify_0, send_ac_msg_pend_notify_0_work); init_kthread_work(&shrm->send_ac_msg_pend_notify_1, send_ac_msg_pend_notify_1_work); init_kthread_work(&shrm->shm_ca_wake_req, shm_ca_wake_req_work); init_kthread_work(&shrm->shm_ca_sleep_req, shm_ca_sleep_req_work); init_kthread_work(&shrm->shm_ac_sleep_req, shm_ac_sleep_req_work); init_kthread_work(&shrm->shm_ac_wake_req, shm_ac_wake_req_work); init_kthread_work(&shrm->shm_mod_reset_req, shm_mod_reset_req_work); init_kthread_work(&shrm->shm_print_dbg_info, shm_print_dbg_info_work); init_kthread_work(&shrm->shm_clk_disable_req, shm_sysclk_disable_work); /* set tasklet data */ shm_ca_0_tasklet.data = (unsigned long)shrm; shm_ca_1_tasklet.data = (unsigned long)shrm; err = request_irq(IRQ_PRCMU_CA_SLEEP, shrm_prcmu_irq_handler, IRQF_NO_SUSPEND, "ca-sleep", shrm); if (err < 0) { dev_err(shm_dev->dev, "Failed alloc IRQ_PRCMU_CA_SLEEP.\n"); goto free_kw6; } err = request_irq(IRQ_PRCMU_CA_WAKE, shrm_prcmu_irq_handler, IRQF_NO_SUSPEND, "ca-wake", shrm); if (err < 0) { dev_err(shm_dev->dev, "Failed alloc IRQ_PRCMU_CA_WAKE.\n"); goto drop2; } err = request_irq(IRQ_PRCMU_MODEM_SW_RESET_REQ, shrm_prcmu_irq_handler, IRQF_NO_SUSPEND, "modem-sw-reset-req", shrm); if (err < 0) { dev_err(shm_dev->dev, "Failed alloc IRQ_PRCMU_MODEM_SW_RESET_REQ.\n"); goto drop1; } #ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET /* init netlink socket for user-space communication */ shrm_nl_sk = netlink_kernel_create(NULL, NETLINK_SHRM, 1, shm_nl_receive, NULL, THIS_MODULE); if (!shrm_nl_sk) { dev_err(shm_dev->dev, "netlink socket creation failed\n"); goto drop; } #endif return 0; #ifdef CONFIG_U8500_SHRM_MODEM_SILENT_RESET drop: free_irq(IRQ_PRCMU_MODEM_SW_RESET_REQ, NULL); #endif drop1: free_irq(IRQ_PRCMU_CA_WAKE, NULL); drop2: free_irq(IRQ_PRCMU_CA_SLEEP, NULL); free_kw6: kthread_stop(shrm->shm_mod_stuck_kw_task); free_kw5: kthread_stop(shrm->shm_ac_sleep_kw_task); free_kw4: kthread_stop(shrm->shm_ca_wake_kw_task); free_kw3: kthread_stop(shrm->shm_ac_wake_kw_task); free_kw2: kthread_stop(shrm->shm_audio_ch_wr_kw_task); free_kw1: kthread_stop(shrm->shm_common_ch_wr_kw_task); return err; } void shrm_protocol_deinit(struct shrm_dev *shrm) { free_irq(IRQ_PRCMU_CA_SLEEP, NULL); free_irq(IRQ_PRCMU_CA_WAKE, NULL); free_irq(IRQ_PRCMU_MODEM_SW_RESET_REQ, NULL); flush_kthread_worker(&shrm->shm_common_ch_wr_kw); flush_kthread_worker(&shrm->shm_audio_ch_wr_kw); flush_kthread_worker(&shrm->shm_ac_wake_kw); flush_kthread_worker(&shrm->shm_ca_wake_kw); flush_kthread_worker(&shrm->shm_ac_sleep_kw); flush_kthread_worker(&shrm->shm_mod_stuck_kw); kthread_stop(shrm->shm_common_ch_wr_kw_task); kthread_stop(shrm->shm_audio_ch_wr_kw_task); kthread_stop(shrm->shm_ac_wake_kw_task); kthread_stop(shrm->shm_ca_wake_kw_task); kthread_stop(shrm->shm_ac_sleep_kw_task); kthread_stop(shrm->shm_mod_stuck_kw_task); modem_put(shrm->modem); } int get_ca_wake_req_state(void) { return ((atomic_read(&ac_sleep_disable_count) > 0) || modem_get_usage(shm_dev->modem)); } irqreturn_t ca_wake_irq_handler(int irq, void *ctrlr) { struct shrm_dev *shrm = ctrlr; dev_dbg(shrm->dev, "%s IN\n", __func__); /* initialize the FIFO Variables */ if (boot_state == BOOT_INIT) shm_fifo_init(shrm); dev_dbg(shrm->dev, "Inside ca_wake_irq_handler\n"); trace_shrm_wake_irq_handler(0); /* Clear the interrupt */ writel((1 << GOP_CA_WAKE_REQ_BIT), shrm->intr_base + GOP_CLEAR_REGISTER_BASE); /* send ca_wake_ack_interrupt to CMU */ writel((1 << GOP_CA_WAKE_ACK_BIT), shrm->intr_base + GOP_SET_REGISTER_BASE); dev_dbg(shrm->dev, "%s OUT\n", __func__); return IRQ_HANDLED; } irqreturn_t ac_read_notif_0_irq_handler(int irq, void *ctrlr) { unsigned long flags; struct shrm_dev *shrm = ctrlr; dev_dbg(shrm->dev, "%s IN\n", __func__); /* Cancel the modem stuck timer */ spin_lock_irqsave(&start_timer_lock, flags); hrtimer_cancel(&mod_stuck_timer_0); spin_unlock_irqrestore(&start_timer_lock, flags); if (atomic_read(&fifo_full)) { atomic_set(&fifo_full, 0); hrtimer_cancel(&fifo_full_timer); } if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); return IRQ_NONE; } shm_ac_read_0_tasklet.data = (unsigned long)shrm; tasklet_schedule(&shm_ac_read_0_tasklet); local_irq_save(flags); preempt_disable(); if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); preempt_enable(); local_irq_restore(flags); return IRQ_NONE; } if (!get_host_accessport_val()) { dev_err(shrm->dev, "%s: host_accessport is low\n", __func__); queue_kthread_work(&shm_dev->shm_mod_stuck_kw, &shm_dev->shm_mod_reset_req); preempt_enable(); local_irq_restore(flags); return IRQ_NONE; } /* Clear the interrupt */ writel((1 << GOP_COMMON_AC_READ_NOTIFICATION_BIT), shrm->intr_base + GOP_CLEAR_REGISTER_BASE); preempt_enable(); local_irq_restore(flags); dev_dbg(shrm->dev, "%s OUT\n", __func__); return IRQ_HANDLED; } irqreturn_t ac_read_notif_1_irq_handler(int irq, void *ctrlr) { unsigned long flags; struct shrm_dev *shrm = ctrlr; dev_dbg(shrm->dev, "%s IN+\n", __func__); /* Cancel the modem stuck timer */ spin_lock_irqsave(&start_timer_lock, flags); hrtimer_cancel(&mod_stuck_timer_1); spin_unlock_irqrestore(&start_timer_lock, flags); if (atomic_read(&fifo_full)) { atomic_set(&fifo_full, 0); hrtimer_cancel(&fifo_full_timer); } if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); return IRQ_NONE; } shm_ac_read_1_tasklet.data = (unsigned long)shrm; tasklet_schedule(&shm_ac_read_1_tasklet); local_irq_save(flags); preempt_disable(); if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); preempt_enable(); local_irq_restore(flags); return IRQ_NONE; } if (!get_host_accessport_val()) { dev_err(shrm->dev, "%s: host_accessport is low\n", __func__); queue_kthread_work(&shm_dev->shm_mod_stuck_kw, &shm_dev->shm_mod_reset_req); preempt_enable(); local_irq_restore(flags); return IRQ_NONE; } /* Clear the interrupt */ writel((1 << GOP_AUDIO_AC_READ_NOTIFICATION_BIT), shrm->intr_base + GOP_CLEAR_REGISTER_BASE); preempt_enable(); local_irq_restore(flags); dev_dbg(shrm->dev, "%s OUT\n", __func__); return IRQ_HANDLED; } irqreturn_t ca_msg_pending_notif_0_irq_handler(int irq, void *ctrlr) { unsigned long flags; struct shrm_dev *shrm = ctrlr; dev_dbg(shrm->dev, "%s IN\n", __func__); if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); return IRQ_NONE; } tasklet_schedule(&shm_ca_0_tasklet); local_irq_save(flags); preempt_disable(); if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); preempt_enable(); local_irq_restore(flags); return IRQ_NONE; } if (!get_host_accessport_val()) { dev_err(shrm->dev, "%s: host_accessport is low\n", __func__); queue_kthread_work(&shm_dev->shm_mod_stuck_kw, &shm_dev->shm_mod_reset_req); preempt_enable(); local_irq_restore(flags); return IRQ_NONE; } trace_shrm_ca_msg_pending_notify_0_irq_handler(0); /* Clear the interrupt */ writel((1 << GOP_COMMON_CA_MSG_PENDING_NOTIFICATION_BIT), shrm->intr_base + GOP_CLEAR_REGISTER_BASE); preempt_enable(); local_irq_restore(flags); dev_dbg(shrm->dev, "%s OUT\n", __func__); return IRQ_HANDLED; } irqreturn_t ca_msg_pending_notif_1_irq_handler(int irq, void *ctrlr) { unsigned long flags; struct shrm_dev *shrm = ctrlr; dev_dbg(shrm->dev, "%s IN\n", __func__); if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); return IRQ_NONE; } tasklet_schedule(&shm_ca_1_tasklet); local_irq_save(flags); preempt_disable(); if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); preempt_enable(); local_irq_restore(flags); return IRQ_NONE; } if (!get_host_accessport_val()) { dev_err(shrm->dev, "%s: host_accessport is low\n", __func__); queue_kthread_work(&shm_dev->shm_mod_stuck_kw, &shm_dev->shm_mod_reset_req); preempt_enable(); local_irq_restore(flags); return IRQ_NONE; } trace_shrm_ca_msg_pending_notify_1_irq_handler(0); /* Clear the interrupt */ writel((1<<GOP_AUDIO_CA_MSG_PENDING_NOTIFICATION_BIT), shrm->intr_base+GOP_CLEAR_REGISTER_BASE); preempt_enable(); local_irq_restore(flags); dev_dbg(shrm->dev, "%s OUT\n", __func__); return IRQ_HANDLED; } /** * shm_write_msg() - write message to shared memory * @shrm: pointer to the shrm device information structure * @l2_header: L2 header * @addr: pointer to the message * @length: length of the message to be written * * This function is called from net or char interface driver write operation. * Prior to calling this function the message is copied from the user space * buffer to the kernel buffer. This function based on the l2 header routes * the message to the respective channel and FIFO. Then makes a call to the * fifo write function where the message is written to the physical device. */ int shm_write_msg(struct shrm_dev *shrm, u8 l2_header, void *addr, u32 length) { u8 channel = 0; int ret, i; u8 *temp; dev_dbg(shrm->dev, "%s IN\n", __func__); if (boot_state != BOOT_DONE) { dev_err(shrm->dev, "error:after boot done call this fn, L2Header = %d\n", l2_header); dev_err(shrm->dev, "packet not sent, modem in reset"); temp = (u8 *)addr; for (i = 0; i < length; i++) dev_dbg(shrm->dev, "data[%d]=%02x\t", i, temp[i]); dev_dbg(shrm->dev, "\n"); /* * If error is returned then phonet tends to resend the msg * this will lead to the msg bouncing to and fro between * phonet and shrm, hence dont return error. */ ret = 0; goto out; } if ((l2_header == L2_HEADER_ISI) || (l2_header == L2_HEADER_RPC) || (l2_header == L2_HEADER_SECURITY) || (l2_header == L2_HEADER_COMMON_SIMPLE_LOOPBACK) || (l2_header == L2_HEADER_COMMON_ADVANCED_LOOPBACK) || (l2_header == L2_HEADER_CIQ) || (l2_header == L2_HEADER_RTC_CALIBRATION)) { channel = 0; if (shrm_common_tx_state == SHRM_SLEEP_STATE) shrm_common_tx_state = SHRM_PTR_FREE; else if (shrm_common_tx_state == SHRM_IDLE) shrm_common_tx_state = SHRM_PTR_FREE; } else if ((l2_header == L2_HEADER_AUDIO) || (l2_header == L2_HEADER_AUDIO_SIMPLE_LOOPBACK) || (l2_header == L2_HEADER_AUDIO_ADVANCED_LOOPBACK)) { if (shrm_audio_tx_state == SHRM_SLEEP_STATE) shrm_audio_tx_state = SHRM_PTR_FREE; else if (shrm_audio_tx_state == SHRM_IDLE) shrm_audio_tx_state = SHRM_PTR_FREE; channel = 1; } else { ret = -ENODEV; goto out; } ret = shm_write_msg_to_fifo(shrm, channel, l2_header, addr, length); if (ret < 0) { dev_err(shrm->dev, "write message to fifo failed\n"); if (ret == -EAGAIN) { if (!atomic_read(&fifo_full)) { /* Start a timer so as to handle this gently */ atomic_set(&fifo_full, 1); hrtimer_start(&fifo_full_timer, ktime_set( FIFO_FULL_TIMEOUT, 0), HRTIMER_MODE_REL); } } return ret; } /* * notify only if new msg copied is the only unread one * otherwise it means that reading process is ongoing */ if (is_the_only_one_unread_message(shrm, channel, length)) { /* Send Message Pending Noitication to CMT */ if (channel == 0) queue_kthread_work(&shrm->shm_common_ch_wr_kw, &shrm->send_ac_msg_pend_notify_0); else queue_kthread_work(&shrm->shm_audio_ch_wr_kw, &shrm->send_ac_msg_pend_notify_1); } dev_dbg(shrm->dev, "%s OUT\n", __func__); return 0; out: return ret; } void ca_msg_read_notification_0(struct shrm_dev *shrm) { unsigned long flags; dev_dbg(shrm->dev, "%s IN\n", __func__); if (get_ca_msg_0_read_notif_send() == 0) { update_ca_common_shared_rptr(shrm); local_irq_save(flags); preempt_disable(); if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); preempt_enable(); local_irq_restore(flags); return; } if (!get_host_accessport_val()) { dev_err(shrm->dev, "%s: host_accessport is low\n", __func__); queue_kthread_work(&shm_dev->shm_mod_stuck_kw, &shm_dev->shm_mod_reset_req); preempt_enable(); local_irq_restore(flags); return; } trace_shrm_ca_msg_read_notification_0(0); /* Trigger CaMsgReadNotification to CMU */ writel((1 << GOP_COMMON_CA_READ_NOTIFICATION_BIT), shrm->intr_base + GOP_SET_REGISTER_BASE); preempt_enable(); local_irq_restore(flags); set_ca_msg_0_read_notif_send(1); shrm_common_rx_state = SHRM_PTR_BUSY; } dev_dbg(shrm->dev, "%s OUT\n", __func__); } void ca_msg_read_notification_1(struct shrm_dev *shrm) { unsigned long flags; dev_dbg(shrm->dev, "%s IN\n", __func__); if (get_ca_msg_1_read_notif_send() == 0) { update_ca_audio_shared_rptr(shrm); local_irq_save(flags); preempt_disable(); if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); preempt_enable(); local_irq_restore(flags); return; } if (!get_host_accessport_val()) { dev_err(shrm->dev, "%s: host_accessport is low\n", __func__); queue_kthread_work(&shm_dev->shm_mod_stuck_kw, &shm_dev->shm_mod_reset_req); preempt_enable(); local_irq_restore(flags); return; } trace_shrm_ca_msg_read_notification_1(0); /* Trigger CaMsgReadNotification to CMU */ writel((1<<GOP_AUDIO_CA_READ_NOTIFICATION_BIT), shrm->intr_base+GOP_SET_REGISTER_BASE); preempt_enable(); local_irq_restore(flags); set_ca_msg_1_read_notif_send(1); shrm_audio_rx_state = SHRM_PTR_BUSY; } dev_dbg(shrm->dev, "%s OUT\n", __func__); } /** * receive_messages_common - receive common channnel msg from * CMT(Cellular Mobile Terminal) * @shrm: pointer to shrm device information structure * * The messages sent from CMT to APE are written to the respective FIFO * and an interrupt is triggered by the CMT. This ca message pending * interrupt calls this function. This function sends a read notification * acknowledgement to the CMT and calls the common channel receive handler * where the messsage is copied to the respective(ISI, RPC, SECURIT) queue * based on the message l2 header. */ void receive_messages_common(struct shrm_dev *shrm) { u8 l2_header; u32 len; if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); return; } l2_header = read_one_l2msg_common(shrm, recieve_common_msg, &len); /* Send Recieve_Call_back to Upper Layer */ if (!rx_common_handler) { dev_err(shrm->dev, "common_rx_handler is Null\n"); BUG(); } (*rx_common_handler)(l2_header, &recieve_common_msg, len, shrm); /* SendReadNotification */ ca_msg_read_notification_0(shrm); while (read_remaining_messages_common()) { if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); return; } l2_header = read_one_l2msg_common(shrm, recieve_common_msg, &len); /* Send Recieve_Call_back to Upper Layer */ (*rx_common_handler)(l2_header, &recieve_common_msg, len, shrm); } } /** * receive_messages_audio() - receive audio message from CMT * @shrm: pointer to shrm device information structure * * The messages sent from CMT to APE are written to the respective FIFO * and an interrupt is triggered by the CMT. This ca message pending * interrupt calls this function. This function sends a read notification * acknowledgement to the CMT and calls the common channel receive handler * where the messsage is copied to the audio queue. */ void receive_messages_audio(struct shrm_dev *shrm) { u8 l2_header; u32 len; if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); return; } l2_header = read_one_l2msg_audio(shrm, recieve_audio_msg, &len); /* Send Recieve_Call_back to Upper Layer */ if (!rx_audio_handler) { dev_crit(shrm->dev, "audio_rx_handler is Null\n"); BUG(); } (*rx_audio_handler)(l2_header, &recieve_audio_msg, len, shrm); /* SendReadNotification */ ca_msg_read_notification_1(shrm); while (read_remaining_messages_audio()) { if (check_modem_in_reset(shrm)) { dev_err(shrm->dev, "%s:Modem state reset or unknown.\n", __func__); return; } l2_header = read_one_l2msg_audio(shrm, recieve_audio_msg, &len); /* Send Recieve_Call_back to Upper Layer */ (*rx_audio_handler)(l2_header, &recieve_audio_msg, len, shrm); } } u8 get_boot_state() { return boot_state; }
gpl-2.0
mhugo/QGIS
src/providers/oracle/qgsoraclefeatureiterator.cpp
7
18485
/*************************************************************************** qgsoraclefeatureiterator.cpp - Oracle feature iterator --------------------- begin : December 2012 copyright : (C) 2012 by Juergen E. Fischer email : jef at norbit dot de *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgsoraclefeatureiterator.h" #include "qgsoracleprovider.h" #include "qgsoracleconnpool.h" #include "qgsoracleexpressioncompiler.h" #include "qgsoracletransaction.h" #include "qgslogger.h" #include "qgsmessagelog.h" #include "qgsgeometry.h" #include "qgssettings.h" #include "qgsexception.h" #include <QObject> QgsOracleFeatureIterator::QgsOracleFeatureIterator( QgsOracleFeatureSource *source, bool ownSource, const QgsFeatureRequest &request ) : QgsAbstractFeatureIteratorFromSource<QgsOracleFeatureSource>( source, ownSource, request ) { if ( !source->mTransactionConnection ) { mConnection = QgsOracleConnPool::instance()->acquireConnection( QgsOracleConn::toPoolName( mSource->mUri ), request.timeout(), request.requestMayBeNested() ); } else { mConnection = source->mTransactionConnection; mIsTransactionConnection = true; } if ( !mConnection ) { close(); return; } if ( mRequest.destinationCrs().isValid() && mRequest.destinationCrs() != mSource->mCrs ) { mTransform = QgsCoordinateTransform( mSource->mCrs, mRequest.destinationCrs(), mRequest.transformContext() ); } try { mFilterRect = filterRectToSourceCrs( mTransform ); } catch ( QgsCsException & ) { // can't reproject mFilterRect mClosed = true; return; } QVariantList args; mQry = QSqlQuery( *mConnection ); if ( mRequest.flags() & QgsFeatureRequest::SubsetOfAttributes ) { mAttributeList = mRequest.subsetOfAttributes(); // ensure that all attributes required for expression filter are being fetched if ( mRequest.filterType() == QgsFeatureRequest::FilterExpression ) { const auto constReferencedColumns = mRequest.filterExpression()->referencedColumns(); for ( const QString &field : constReferencedColumns ) { int attrIdx = mSource->mFields.lookupField( field ); if ( !mAttributeList.contains( attrIdx ) ) mAttributeList << attrIdx; } } // ensure that all attributes required for order by are fetched const QSet< QString > orderByAttributes = mRequest.orderBy().usedAttributes(); for ( const QString &attr : orderByAttributes ) { int attrIndex = mSource->mFields.lookupField( attr ); if ( !mAttributeList.contains( attrIndex ) ) mAttributeList << attrIndex; } } else mAttributeList = mSource->mFields.allAttributesList(); bool limitAtProvider = ( mRequest.limit() >= 0 ); QString whereClause; if ( !mSource->mGeometryColumn.isNull() ) { // fetch geometry if requested mFetchGeometry = ( mRequest.flags() & QgsFeatureRequest::NoGeometry ) == 0; if ( mRequest.filterType() == QgsFeatureRequest::FilterExpression && mRequest.filterExpression()->needsGeometry() ) { mFetchGeometry = true; } if ( !mFilterRect.isNull() ) { // sdo_filter requires spatial index if ( mSource->mHasSpatialIndex ) { QString bbox = QStringLiteral( "mdsys.sdo_geometry(2003,?,NULL," "mdsys.sdo_elem_info_array(1,1003,3)," "mdsys.sdo_ordinate_array(?,?,?,?)" ")" ); whereClause = QStringLiteral( "sdo_filter(%1,%2)='TRUE'" ) .arg( QgsOracleProvider::quotedIdentifier( mSource->mGeometryColumn ), bbox ); args << ( mSource->mSrid < 1 ? QVariant( QVariant::Int ) : mSource->mSrid ) << mFilterRect.xMinimum() << mFilterRect.yMinimum() << mFilterRect.xMaximum() << mFilterRect.yMaximum(); if ( ( mRequest.flags() & QgsFeatureRequest::ExactIntersect ) != 0 ) { // sdo_relate requires Spatial if ( mConnection->hasSpatial() ) { whereClause += QStringLiteral( " AND sdo_relate(%1,%2,'mask=ANYINTERACT')='TRUE'" ) .arg( QgsOracleProvider::quotedIdentifier( mSource->mGeometryColumn ), bbox ); args << ( mSource->mSrid < 1 ? QVariant( QVariant::Int ) : mSource->mSrid ) << mFilterRect.xMinimum() << mFilterRect.yMinimum() << mFilterRect.xMaximum() << mFilterRect.yMaximum(); } else { // request geometry to do exact intersect in fetchFeature mFetchGeometry = true; } } } else { // request geometry to do bbox intersect in fetchFeature mFetchGeometry = true; } } } else if ( !mFilterRect.isNull() ) { QgsDebugMsg( QStringLiteral( "filterRect without geometry ignored" ) ); } switch ( mRequest.filterType() ) { case QgsFeatureRequest::FilterFid: { QString fidWhereClause = QgsOracleUtils::whereClause( mRequest.filterFid(), mSource->mFields, mSource->mPrimaryKeyType, mSource->mPrimaryKeyAttrs, mSource->mShared, args ); whereClause = QgsOracleUtils::andWhereClauses( whereClause, fidWhereClause ); } break; case QgsFeatureRequest::FilterFids: { QString fidsWhereClause = QgsOracleUtils::whereClause( mRequest.filterFids(), mSource->mFields, mSource->mPrimaryKeyType, mSource->mPrimaryKeyAttrs, mSource->mShared, args ); whereClause = QgsOracleUtils::andWhereClauses( whereClause, fidsWhereClause ); } break; case QgsFeatureRequest::FilterNone: break; case QgsFeatureRequest::FilterExpression: //handled below break; } if ( mSource->mRequestedGeomType != QgsWkbTypes::Unknown && mSource->mRequestedGeomType != mSource->mDetectedGeomType ) { if ( !whereClause.isEmpty() ) whereClause += QStringLiteral( " AND " ); whereClause += '('; whereClause += QgsOracleConn::databaseTypeFilter( QStringLiteral( "FEATUREREQUEST" ), mSource->mGeometryColumn, mSource->mRequestedGeomType ); if ( mFilterRect.isNull() ) whereClause += QStringLiteral( " OR %1 IS NULL" ).arg( mSource->mGeometryColumn ); whereClause += ')'; } if ( !mSource->mSqlWhereClause.isEmpty() ) { if ( !whereClause.isEmpty() ) whereClause += QStringLiteral( " AND " ); whereClause += '(' + mSource->mSqlWhereClause + ')'; } //NOTE - must be last added! mExpressionCompiled = false; mCompileStatus = NoCompilation; QString fallbackStatement; bool useFallback = false; if ( request.filterType() == QgsFeatureRequest::FilterExpression ) { if ( QgsSettings().value( QStringLiteral( "qgis/compileExpressions" ), true ).toBool() ) { QgsOracleExpressionCompiler compiler( mSource ); QgsSqlExpressionCompiler::Result result = compiler.compile( mRequest.filterExpression() ); if ( result == QgsSqlExpressionCompiler::Complete || result == QgsSqlExpressionCompiler::Partial ) { fallbackStatement = whereClause; useFallback = true; whereClause = QgsOracleUtils::andWhereClauses( whereClause, compiler.result() ); //if only partial success when compiling expression, we need to double-check results using QGIS' expressions mExpressionCompiled = ( result == QgsSqlExpressionCompiler::Complete ); mCompileStatus = ( mExpressionCompiled ? Compiled : PartiallyCompiled ); limitAtProvider = mExpressionCompiled; } else { limitAtProvider = false; } } else { limitAtProvider = false; } } if ( !mRequest.orderBy().isEmpty() ) { limitAtProvider = false; } if ( mRequest.limit() >= 0 && limitAtProvider ) { if ( !whereClause.isEmpty() ) whereClause += QStringLiteral( " AND " ); whereClause += QStringLiteral( "rownum<=?" ); fallbackStatement += QStringLiteral( "rownum<=?" ); args << QVariant::fromValue( mRequest.limit() ); } bool result = openQuery( whereClause, args, !useFallback ); if ( !result && useFallback ) { result = openQuery( fallbackStatement, args ); if ( result ) { mExpressionCompiled = false; mCompileStatus = NoCompilation; } } } QgsOracleFeatureIterator::~QgsOracleFeatureIterator() { close(); } bool QgsOracleFeatureIterator::nextFeatureFilterExpression( QgsFeature &f ) { if ( !mExpressionCompiled ) return QgsAbstractFeatureIterator::nextFeatureFilterExpression( f ); else return fetchFeature( f ); } bool QgsOracleFeatureIterator::fetchFeature( QgsFeature &feature ) { feature.setValid( false ); if ( !mQry.isActive() ) return false; for ( ;; ) { feature.initAttributes( mSource->mFields.count() ); feature.clearGeometry(); feature.setValid( false ); if ( mRewind ) { mRewind = false; if ( !execQuery( mSql, mArgs, 1 ) ) { QgsMessageLog::logMessage( QObject::tr( "Fetching features failed.\nSQL: %1\nError: %2" ) .arg( mQry.lastQuery(), mQry.lastError().text() ), QObject::tr( "Oracle" ) ); return false; } } if ( !mQry.next() ) { return false; } int col = 0; if ( mFetchGeometry ) { QByteArray ba( mQry.value( col++ ).toByteArray() ); if ( ba.size() > 0 ) { QgsGeometry g; g.fromWkb( ba ); feature.setGeometry( g ); } else { feature.clearGeometry(); } if ( !mFilterRect.isNull() ) { if ( !feature.hasGeometry() ) { QgsDebugMsgLevel( QStringLiteral( "no geometry to intersect" ), 4 ); continue; } if ( ( mRequest.flags() & QgsFeatureRequest::ExactIntersect ) == 0 ) { // even if we could use sdo_filter earlier, we still need to double-check the results // as sdo_filter can return results outside the filter (it's only a first-pass // filtering operation!) // only want features which intersect with bbox if ( !feature.geometry().boundingBox().intersects( mFilterRect ) ) { // skip feature that don't intersect with our rectangle QgsDebugMsgLevel( QStringLiteral( "no bbox intersect" ), 4 ); continue; } } else if ( !mConnection->hasSpatial() || !mSource->mHasSpatialIndex ) { // couldn't use sdo_relate earlier if ( !feature.geometry().intersects( mFilterRect ) ) { // skip feature that don't intersect with our rectangle QgsDebugMsgLevel( QStringLiteral( "no exact intersect" ), 4 ); continue; } } } } QgsFeatureId fid = 0; switch ( mSource->mPrimaryKeyType ) { case PktInt: // get 64bit integer from result fid = mQry.value( col++ ).toLongLong(); if ( mAttributeList.contains( mSource->mPrimaryKeyAttrs.value( 0 ) ) ) feature.setAttribute( mSource->mPrimaryKeyAttrs.value( 0 ), fid ); break; case PktRowId: case PktFidMap: { QVariantList primaryKeyVals; if ( mSource->mPrimaryKeyType == PktFidMap ) { Q_FOREACH ( int idx, mSource->mPrimaryKeyAttrs ) { QgsField fld = mSource->mFields.at( idx ); QVariant v = mQry.value( col ); if ( v.type() != fld.type() ) v = QgsVectorDataProvider::convertValue( fld.type(), v.toString() ); primaryKeyVals << v; if ( mAttributeList.contains( idx ) ) feature.setAttribute( idx, v ); col++; } } else { primaryKeyVals << mQry.value( col++ ); } fid = mSource->mShared->lookupFid( primaryKeyVals ); } break; case PktUnknown: Q_ASSERT( !"FAILURE: cannot get feature with unknown primary key" ); return false; } feature.setId( fid ); QgsDebugMsgLevel( QStringLiteral( "fid=%1" ).arg( fid ), 5 ); // iterate attributes const auto constMAttributeList = mAttributeList; for ( int idx : constMAttributeList ) { if ( mSource->mPrimaryKeyAttrs.contains( idx ) ) continue; QgsField fld = mSource->mFields.at( idx ); QVariant v = mQry.value( col ); if ( fld.type() == QVariant::ByteArray && fld.typeName().endsWith( QStringLiteral( ".SDO_GEOMETRY" ) ) ) { QByteArray ba( v.toByteArray() ); if ( ba.size() > 0 ) { QgsGeometry g; g.fromWkb( ba ); v = g.asWkt(); } else { v = QVariant( QVariant::String ); } } else if ( v.type() != fld.type() ) v = QgsVectorDataProvider::convertValue( fld.type(), v.toString() ); feature.setAttribute( idx, v ); col++; } feature.setValid( true ); feature.setFields( mSource->mFields ); // allow name-based attribute lookups geometryToDestinationCrs( feature, mTransform ); return true; } } bool QgsOracleFeatureIterator::rewind() { if ( !mQry.isActive() ) return false; // move cursor to first record mRewind = true; return true; } bool QgsOracleFeatureIterator::close() { if ( mQry.isActive() ) mQry.finish(); if ( mConnection && !mIsTransactionConnection ) QgsOracleConnPool::instance()->releaseConnection( mConnection ); mConnection = nullptr; iteratorClosed(); return true; } bool QgsOracleFeatureIterator::openQuery( const QString &whereClause, const QVariantList &args, bool showLog ) { try { QString query = QStringLiteral( "SELECT " ); QString delim; if ( mFetchGeometry ) { query += QgsOracleProvider::quotedIdentifier( mSource->mGeometryColumn ); delim = ','; } switch ( mSource->mPrimaryKeyType ) { case PktRowId: query += delim + QgsOracleProvider::quotedIdentifier( QStringLiteral( "ROWID" ) ); delim = ','; break; case PktInt: query += delim + QgsOracleProvider::quotedIdentifier( mSource->mFields.at( mSource->mPrimaryKeyAttrs[0] ).name() ); delim = ','; break; case PktFidMap: Q_FOREACH ( int idx, mSource->mPrimaryKeyAttrs ) { query += delim + mConnection->fieldExpression( mSource->mFields.at( idx ) ); delim = ','; } break; case PktUnknown: QgsDebugMsg( QStringLiteral( "Cannot query without primary key." ) ); return false; } const auto constMAttributeList = mAttributeList; for ( int idx : constMAttributeList ) { if ( mSource->mPrimaryKeyAttrs.contains( idx ) ) continue; query += delim + mConnection->fieldExpression( mSource->mFields.at( idx ) ); } query += QStringLiteral( " FROM %1 \"FEATUREREQUEST\"" ).arg( mSource->mQuery ); if ( !whereClause.isEmpty() ) query += QStringLiteral( " WHERE %1" ).arg( whereClause ); QgsDebugMsg( QStringLiteral( "Fetch features: %1" ).arg( query ) ); mSql = query; mArgs = args; if ( !execQuery( query, args, 1 ) ) { if ( showLog ) { QgsMessageLog::logMessage( QObject::tr( "Fetching features failed.\nSQL: %1\nError: %2" ) .arg( mQry.lastQuery(), mQry.lastError().text() ), QObject::tr( "Oracle" ) ); } return false; } } catch ( QgsOracleProvider::OracleFieldNotFound ) { return false; } return true; } bool QgsOracleFeatureIterator::execQuery( const QString &query, const QVariantList &args, int retryCount ) { lock(); if ( !QgsOracleProvider::exec( mQry, query, args ) ) { unlock(); if ( retryCount != 0 ) { // If the connection has been closed try again N times in case of timeout // ORA-12170: TNS:Connect timeout occurred // Or if there is a problem with the network connectivity try again N times // ORA-03114: Not Connected to Oracle if ( mQry.lastError().number() == 12170 || mQry.lastError().number() == 3114 ) { // restart connection mConnection->reconnect(); // redo execute query return execQuery( query, args, retryCount - 1 ); } } return false; } else { unlock(); } return true; } void QgsOracleFeatureIterator::lock() { if ( mIsTransactionConnection ) mConnection->lock(); } void QgsOracleFeatureIterator::unlock() { if ( mIsTransactionConnection ) mConnection->unlock(); } // ----------- QgsOracleFeatureSource::QgsOracleFeatureSource( const QgsOracleProvider *p ) : mUri( p->mUri ) , mFields( p->mAttributeFields ) , mGeometryColumn( p->mGeometryColumn ) , mSrid( p->mSrid ) , mHasSpatialIndex( p->mHasSpatialIndex ) , mDetectedGeomType( p->mDetectedGeomType ) , mRequestedGeomType( p->mRequestedGeomType ) , mSqlWhereClause( p->mSqlWhereClause ) , mPrimaryKeyType( p->mPrimaryKeyType ) , mPrimaryKeyAttrs( p->mPrimaryKeyAttrs ) , mQuery( p->mQuery ) , mCrs( p->crs() ) , mShared( p->mShared ) { if ( p->mTransaction ) { mTransactionConnection = p->mTransaction->connection(); mTransactionConnection->ref(); } } QgsOracleFeatureSource::~QgsOracleFeatureSource() { if ( mTransactionConnection ) { mTransactionConnection->unref(); } } QgsFeatureIterator QgsOracleFeatureSource::getFeatures( const QgsFeatureRequest &request ) { return QgsFeatureIterator( new QgsOracleFeatureIterator( this, false, request ) ); }
gpl-2.0
VanirAOSP/kernel_samsung_golden
drivers/scsi/hpsa.c
7
127307
/* * Disk Array driver for HP Smart Array SAS controllers * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Questions/Comments/Bugfixes to iss_storagedev@hp.com * */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/pci-aspm.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/timer.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/compat.h> #include <linux/blktrace_api.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/completion.h> #include <linux/moduleparam.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <linux/cciss_ioctl.h> #include <linux/string.h> #include <linux/bitmap.h> #include <asm/atomic.h> #include <linux/kthread.h> #include "hpsa_cmd.h" #include "hpsa.h" /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ #define HPSA_DRIVER_VERSION "2.0.2-1" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" /* How long to wait (in milliseconds) for board to go into simple mode */ #define MAX_CONFIG_WAIT 30000 #define MAX_IOCTL_CONFIG_WAIT 1000 /*define how many times we will try a command because of bus resets */ #define MAX_CMD_RETRIES 3 /* Embedded module documentation macros - see modules.h */ MODULE_AUTHOR("Hewlett-Packard Company"); MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ HPSA_DRIVER_VERSION); MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); MODULE_VERSION(HPSA_DRIVER_VERSION); MODULE_LICENSE("GPL"); static int hpsa_allow_any; module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(hpsa_allow_any, "Allow hpsa driver to access unknown HP Smart Array hardware"); static int hpsa_simple_mode; module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(hpsa_simple_mode, "Use 'simple mode' rather than 'performant mode'"); /* define the PCI info for the cards we can control */ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); /* board_id = Subsystem Device ID & Vendor ID * product = Marketing Name for the board * access = Address of the struct of function pointers */ static struct board_type products[] = { {0x3241103C, "Smart Array P212", &SA5_access}, {0x3243103C, "Smart Array P410", &SA5_access}, {0x3245103C, "Smart Array P410i", &SA5_access}, {0x3247103C, "Smart Array P411", &SA5_access}, {0x3249103C, "Smart Array P812", &SA5_access}, {0x324a103C, "Smart Array P712m", &SA5_access}, {0x324b103C, "Smart Array P711m", &SA5_access}, {0x3350103C, "Smart Array", &SA5_access}, {0x3351103C, "Smart Array", &SA5_access}, {0x3352103C, "Smart Array", &SA5_access}, {0x3353103C, "Smart Array", &SA5_access}, {0x3354103C, "Smart Array", &SA5_access}, {0x3355103C, "Smart Array", &SA5_access}, {0x3356103C, "Smart Array", &SA5_access}, {0xFFFF103C, "Unknown Smart Array", &SA5_access}, }; static int number_of_controllers; static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); static void start_io(struct ctlr_info *h); #ifdef CONFIG_COMPAT static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); #endif static void cmd_free(struct ctlr_info *h, struct CommandList *c); static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); static struct CommandList *cmd_alloc(struct ctlr_info *h); static struct CommandList *cmd_special_alloc(struct ctlr_info *h); static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, int cmd_type); static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); static void hpsa_scan_start(struct Scsi_Host *); static int hpsa_scan_finished(struct Scsi_Host *sh, unsigned long elapsed_time); static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason); static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); static int hpsa_slave_alloc(struct scsi_device *sdev); static void hpsa_slave_destroy(struct scsi_device *sdev); static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); static int check_for_unit_attention(struct ctlr_info *h, struct CommandList *c); static void check_ioctl_unit_attention(struct ctlr_info *h, struct CommandList *c); /* performant mode helper functions */ static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, int *bucket_map); static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); static inline u32 next_command(struct ctlr_info *h); static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, u64 *cfg_offset); static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar); static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, int wait_for_ready); #define BOARD_NOT_READY 0 #define BOARD_READY 1 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) { unsigned long *priv = shost_priv(sdev->host); return (struct ctlr_info *) *priv; } static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) { unsigned long *priv = shost_priv(sh); return (struct ctlr_info *) *priv; } static int check_for_unit_attention(struct ctlr_info *h, struct CommandList *c) { if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) return 0; switch (c->err_info->SenseInfo[12]) { case STATE_CHANGED: dev_warn(&h->pdev->dev, "hpsa%d: a state change " "detected, command retried\n", h->ctlr); break; case LUN_FAILED: dev_warn(&h->pdev->dev, "hpsa%d: LUN failure " "detected, action required\n", h->ctlr); break; case REPORT_LUNS_CHANGED: dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " "changed, action required\n", h->ctlr); /* * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. */ break; case POWER_OR_RESET: dev_warn(&h->pdev->dev, "hpsa%d: a power on " "or device reset detected\n", h->ctlr); break; case UNIT_ATTENTION_CLEARED: dev_warn(&h->pdev->dev, "hpsa%d: unit attention " "cleared by another initiator\n", h->ctlr); break; default: dev_warn(&h->pdev->dev, "hpsa%d: unknown " "unit attention detected\n", h->ctlr); break; } return 1; } static ssize_t host_store_rescan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); h = shost_to_hba(shost); hpsa_scan_start(h->scsi_host); return count; } static ssize_t host_show_firmware_revision(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); unsigned char *fwrev; h = shost_to_hba(shost); if (!h->hba_inquiry_data) return 0; fwrev = &h->hba_inquiry_data[32]; return snprintf(buf, 20, "%c%c%c%c\n", fwrev[0], fwrev[1], fwrev[2], fwrev[3]); } static ssize_t host_show_commands_outstanding(struct device *dev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ctlr_info *h = shost_to_hba(shost); return snprintf(buf, 20, "%d\n", h->commands_outstanding); } static ssize_t host_show_transport_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); h = shost_to_hba(shost); return snprintf(buf, 20, "%s\n", h->transMethod & CFGTBL_Trans_Performant ? "performant" : "simple"); } /* List of controllers which cannot be hard reset on kexec with reset_devices */ static u32 unresettable_controller[] = { 0x324a103C, /* Smart Array P712m */ 0x324b103C, /* SmartArray P711m */ 0x3223103C, /* Smart Array P800 */ 0x3234103C, /* Smart Array P400 */ 0x3235103C, /* Smart Array P400i */ 0x3211103C, /* Smart Array E200i */ 0x3212103C, /* Smart Array E200 */ 0x3213103C, /* Smart Array E200i */ 0x3214103C, /* Smart Array E200i */ 0x3215103C, /* Smart Array E200i */ 0x3237103C, /* Smart Array E500 */ 0x323D103C, /* Smart Array P700m */ 0x409C0E11, /* Smart Array 6400 */ 0x409D0E11, /* Smart Array 6400 EM */ }; /* List of controllers which cannot even be soft reset */ static u32 soft_unresettable_controller[] = { /* Exclude 640x boards. These are two pci devices in one slot * which share a battery backed cache module. One controls the * cache, the other accesses the cache through the one that controls * it. If we reset the one controlling the cache, the other will * likely not be happy. Just forbid resetting this conjoined mess. * The 640x isn't really supported by hpsa anyway. */ 0x409C0E11, /* Smart Array 6400 */ 0x409D0E11, /* Smart Array 6400 EM */ }; static int ctlr_is_hard_resettable(u32 board_id) { int i; for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) if (unresettable_controller[i] == board_id) return 0; return 1; } static int ctlr_is_soft_resettable(u32 board_id) { int i; for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) if (soft_unresettable_controller[i] == board_id) return 0; return 1; } static int ctlr_is_resettable(u32 board_id) { return ctlr_is_hard_resettable(board_id) || ctlr_is_soft_resettable(board_id); } static ssize_t host_show_resettable(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); h = shost_to_hba(shost); return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); } static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) { return (scsi3addr[3] & 0xC0) == 0x40; } static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", "UNKNOWN" }; #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) static ssize_t raid_level_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t l = 0; unsigned char rlevel; struct ctlr_info *h; struct scsi_device *sdev; struct hpsa_scsi_dev_t *hdev; unsigned long flags; sdev = to_scsi_device(dev); h = sdev_to_hba(sdev); spin_lock_irqsave(&h->lock, flags); hdev = sdev->hostdata; if (!hdev) { spin_unlock_irqrestore(&h->lock, flags); return -ENODEV; } /* Is this even a logical drive? */ if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { spin_unlock_irqrestore(&h->lock, flags); l = snprintf(buf, PAGE_SIZE, "N/A\n"); return l; } rlevel = hdev->raid_level; spin_unlock_irqrestore(&h->lock, flags); if (rlevel > RAID_UNKNOWN) rlevel = RAID_UNKNOWN; l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); return l; } static ssize_t lunid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct scsi_device *sdev; struct hpsa_scsi_dev_t *hdev; unsigned long flags; unsigned char lunid[8]; sdev = to_scsi_device(dev); h = sdev_to_hba(sdev); spin_lock_irqsave(&h->lock, flags); hdev = sdev->hostdata; if (!hdev) { spin_unlock_irqrestore(&h->lock, flags); return -ENODEV; } memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); spin_unlock_irqrestore(&h->lock, flags); return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", lunid[0], lunid[1], lunid[2], lunid[3], lunid[4], lunid[5], lunid[6], lunid[7]); } static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h; struct scsi_device *sdev; struct hpsa_scsi_dev_t *hdev; unsigned long flags; unsigned char sn[16]; sdev = to_scsi_device(dev); h = sdev_to_hba(sdev); spin_lock_irqsave(&h->lock, flags); hdev = sdev->hostdata; if (!hdev) { spin_unlock_irqrestore(&h->lock, flags); return -ENODEV; } memcpy(sn, hdev->device_id, sizeof(sn)); spin_unlock_irqrestore(&h->lock, flags); return snprintf(buf, 16 * 2 + 2, "%02X%02X%02X%02X%02X%02X%02X%02X" "%02X%02X%02X%02X%02X%02X%02X%02X\n", sn[0], sn[1], sn[2], sn[3], sn[4], sn[5], sn[6], sn[7], sn[8], sn[9], sn[10], sn[11], sn[12], sn[13], sn[14], sn[15]); } static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); static DEVICE_ATTR(firmware_revision, S_IRUGO, host_show_firmware_revision, NULL); static DEVICE_ATTR(commands_outstanding, S_IRUGO, host_show_commands_outstanding, NULL); static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL); static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); static struct device_attribute *hpsa_sdev_attrs[] = { &dev_attr_raid_level, &dev_attr_lunid, &dev_attr_unique_id, NULL, }; static struct device_attribute *hpsa_shost_attrs[] = { &dev_attr_rescan, &dev_attr_firmware_revision, &dev_attr_commands_outstanding, &dev_attr_transport_mode, &dev_attr_resettable, NULL, }; static struct scsi_host_template hpsa_driver_template = { .module = THIS_MODULE, .name = "hpsa", .proc_name = "hpsa", .queuecommand = hpsa_scsi_queue_command, .scan_start = hpsa_scan_start, .scan_finished = hpsa_scan_finished, .change_queue_depth = hpsa_change_queue_depth, .this_id = -1, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = hpsa_eh_device_reset_handler, .ioctl = hpsa_ioctl, .slave_alloc = hpsa_slave_alloc, .slave_destroy = hpsa_slave_destroy, #ifdef CONFIG_COMPAT .compat_ioctl = hpsa_compat_ioctl, #endif .sdev_attrs = hpsa_sdev_attrs, .shost_attrs = hpsa_shost_attrs, }; /* Enqueuing and dequeuing functions for cmdlists. */ static inline void addQ(struct list_head *list, struct CommandList *c) { list_add_tail(&c->list, list); } static inline u32 next_command(struct ctlr_info *h) { u32 a; if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) return h->access.command_completed(h); if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { a = *(h->reply_pool_head); /* Next cmd in ring buffer */ (h->reply_pool_head)++; h->commands_outstanding--; } else { a = FIFO_EMPTY; } /* Check for wraparound */ if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { h->reply_pool_head = h->reply_pool; h->reply_pool_wraparound ^= 1; } return a; } /* set_performant_mode: Modify the tag for cciss performant * set bit 0 for pull model, bits 3-1 for block fetch * register number */ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) { if (likely(h->transMethod & CFGTBL_Trans_Performant)) c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); } static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) { unsigned long flags; set_performant_mode(h, c); spin_lock_irqsave(&h->lock, flags); addQ(&h->reqQ, c); h->Qdepth++; start_io(h); spin_unlock_irqrestore(&h->lock, flags); } static inline void removeQ(struct CommandList *c) { if (WARN_ON(list_empty(&c->list))) return; list_del_init(&c->list); } static inline int is_hba_lunid(unsigned char scsi3addr[]) { return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; } static inline int is_scsi_rev_5(struct ctlr_info *h) { if (!h->hba_inquiry_data) return 0; if ((h->hba_inquiry_data[2] & 0x07) == 5) return 1; return 0; } static int hpsa_find_target_lun(struct ctlr_info *h, unsigned char scsi3addr[], int bus, int *target, int *lun) { /* finds an unused bus, target, lun for a new physical device * assumes h->devlock is held */ int i, found = 0; DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA); memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3); for (i = 0; i < h->ndevices; i++) { if (h->dev[i]->bus == bus && h->dev[i]->target != -1) set_bit(h->dev[i]->target, lun_taken); } for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) { if (!test_bit(i, lun_taken)) { /* *bus = 1; */ *target = i; *lun = 0; found = 1; break; } } return !found; } /* Add an entry into h->dev[] array. */ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, struct hpsa_scsi_dev_t *device, struct hpsa_scsi_dev_t *added[], int *nadded) { /* assumes h->devlock is held */ int n = h->ndevices; int i; unsigned char addr1[8], addr2[8]; struct hpsa_scsi_dev_t *sd; if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) { dev_err(&h->pdev->dev, "too many devices, some will be " "inaccessible.\n"); return -1; } /* physical devices do not have lun or target assigned until now. */ if (device->lun != -1) /* Logical device, lun is already assigned. */ goto lun_assigned; /* If this device a non-zero lun of a multi-lun device * byte 4 of the 8-byte LUN addr will contain the logical * unit no, zero otherise. */ if (device->scsi3addr[4] == 0) { /* This is not a non-zero lun of a multi-lun device */ if (hpsa_find_target_lun(h, device->scsi3addr, device->bus, &device->target, &device->lun) != 0) return -1; goto lun_assigned; } /* This is a non-zero lun of a multi-lun device. * Search through our list and find the device which * has the same 8 byte LUN address, excepting byte 4. * Assign the same bus and target for this new LUN. * Use the logical unit number from the firmware. */ memcpy(addr1, device->scsi3addr, 8); addr1[4] = 0; for (i = 0; i < n; i++) { sd = h->dev[i]; memcpy(addr2, sd->scsi3addr, 8); addr2[4] = 0; /* differ only in byte 4? */ if (memcmp(addr1, addr2, 8) == 0) { device->bus = sd->bus; device->target = sd->target; device->lun = device->scsi3addr[4]; break; } } if (device->lun == -1) { dev_warn(&h->pdev->dev, "physical device with no LUN=0," " suspect firmware bug or unsupported hardware " "configuration.\n"); return -1; } lun_assigned: h->dev[n] = device; h->ndevices++; added[*nadded] = device; (*nadded)++; /* initially, (before registering with scsi layer) we don't * know our hostno and we don't want to print anything first * time anyway (the scsi layer's inquiries will show that info) */ /* if (hostno != -1) */ dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", scsi_device_type(device->devtype), hostno, device->bus, device->target, device->lun); return 0; } /* Replace an entry from h->dev[] array. */ static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, int entry, struct hpsa_scsi_dev_t *new_entry, struct hpsa_scsi_dev_t *added[], int *nadded, struct hpsa_scsi_dev_t *removed[], int *nremoved) { /* assumes h->devlock is held */ BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); removed[*nremoved] = h->dev[entry]; (*nremoved)++; /* * New physical devices won't have target/lun assigned yet * so we need to preserve the values in the slot we are replacing. */ if (new_entry->target == -1) { new_entry->target = h->dev[entry]->target; new_entry->lun = h->dev[entry]->lun; } h->dev[entry] = new_entry; added[*nadded] = new_entry; (*nadded)++; dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", scsi_device_type(new_entry->devtype), hostno, new_entry->bus, new_entry->target, new_entry->lun); } /* Remove an entry from h->dev[] array. */ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, struct hpsa_scsi_dev_t *removed[], int *nremoved) { /* assumes h->devlock is held */ int i; struct hpsa_scsi_dev_t *sd; BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); sd = h->dev[entry]; removed[*nremoved] = h->dev[entry]; (*nremoved)++; for (i = entry; i < h->ndevices-1; i++) h->dev[i] = h->dev[i+1]; h->ndevices--; dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, sd->lun); } #define SCSI3ADDR_EQ(a, b) ( \ (a)[7] == (b)[7] && \ (a)[6] == (b)[6] && \ (a)[5] == (b)[5] && \ (a)[4] == (b)[4] && \ (a)[3] == (b)[3] && \ (a)[2] == (b)[2] && \ (a)[1] == (b)[1] && \ (a)[0] == (b)[0]) static void fixup_botched_add(struct ctlr_info *h, struct hpsa_scsi_dev_t *added) { /* called when scsi_add_device fails in order to re-adjust * h->dev[] to match the mid layer's view. */ unsigned long flags; int i, j; spin_lock_irqsave(&h->lock, flags); for (i = 0; i < h->ndevices; i++) { if (h->dev[i] == added) { for (j = i; j < h->ndevices-1; j++) h->dev[j] = h->dev[j+1]; h->ndevices--; break; } } spin_unlock_irqrestore(&h->lock, flags); kfree(added); } static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, struct hpsa_scsi_dev_t *dev2) { /* we compare everything except lun and target as these * are not yet assigned. Compare parts likely * to differ first */ if (memcmp(dev1->scsi3addr, dev2->scsi3addr, sizeof(dev1->scsi3addr)) != 0) return 0; if (memcmp(dev1->device_id, dev2->device_id, sizeof(dev1->device_id)) != 0) return 0; if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) return 0; if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) return 0; if (dev1->devtype != dev2->devtype) return 0; if (dev1->bus != dev2->bus) return 0; return 1; } /* Find needle in haystack. If exact match found, return DEVICE_SAME, * and return needle location in *index. If scsi3addr matches, but not * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle * location in *index. If needle not found, return DEVICE_NOT_FOUND. */ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, struct hpsa_scsi_dev_t *haystack[], int haystack_size, int *index) { int i; #define DEVICE_NOT_FOUND 0 #define DEVICE_CHANGED 1 #define DEVICE_SAME 2 for (i = 0; i < haystack_size; i++) { if (haystack[i] == NULL) /* previously removed. */ continue; if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { *index = i; if (device_is_the_same(needle, haystack[i])) return DEVICE_SAME; else return DEVICE_CHANGED; } } *index = -1; return DEVICE_NOT_FOUND; } static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, struct hpsa_scsi_dev_t *sd[], int nsds) { /* sd contains scsi3 addresses and devtypes, and inquiry * data. This function takes what's in sd to be the current * reality and updates h->dev[] to reflect that reality. */ int i, entry, device_change, changes = 0; struct hpsa_scsi_dev_t *csd; unsigned long flags; struct hpsa_scsi_dev_t **added, **removed; int nadded, nremoved; struct Scsi_Host *sh = NULL; added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA, GFP_KERNEL); removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA, GFP_KERNEL); if (!added || !removed) { dev_warn(&h->pdev->dev, "out of memory in " "adjust_hpsa_scsi_table\n"); goto free_and_out; } spin_lock_irqsave(&h->devlock, flags); /* find any devices in h->dev[] that are not in * sd[] and remove them from h->dev[], and for any * devices which have changed, remove the old device * info and add the new device info. */ i = 0; nremoved = 0; nadded = 0; while (i < h->ndevices) { csd = h->dev[i]; device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); if (device_change == DEVICE_NOT_FOUND) { changes++; hpsa_scsi_remove_entry(h, hostno, i, removed, &nremoved); continue; /* remove ^^^, hence i not incremented */ } else if (device_change == DEVICE_CHANGED) { changes++; hpsa_scsi_replace_entry(h, hostno, i, sd[entry], added, &nadded, removed, &nremoved); /* Set it to NULL to prevent it from being freed * at the bottom of hpsa_update_scsi_devices() */ sd[entry] = NULL; } i++; } /* Now, make sure every device listed in sd[] is also * listed in h->dev[], adding them if they aren't found */ for (i = 0; i < nsds; i++) { if (!sd[i]) /* if already added above. */ continue; device_change = hpsa_scsi_find_entry(sd[i], h->dev, h->ndevices, &entry); if (device_change == DEVICE_NOT_FOUND) { changes++; if (hpsa_scsi_add_entry(h, hostno, sd[i], added, &nadded) != 0) break; sd[i] = NULL; /* prevent from being freed later. */ } else if (device_change == DEVICE_CHANGED) { /* should never happen... */ changes++; dev_warn(&h->pdev->dev, "device unexpectedly changed.\n"); /* but if it does happen, we just ignore that device */ } } spin_unlock_irqrestore(&h->devlock, flags); /* Don't notify scsi mid layer of any changes the first time through * (or if there are no changes) scsi_scan_host will do it later the * first time through. */ if (hostno == -1 || !changes) goto free_and_out; sh = h->scsi_host; /* Notify scsi mid layer of any removed devices */ for (i = 0; i < nremoved; i++) { struct scsi_device *sdev = scsi_device_lookup(sh, removed[i]->bus, removed[i]->target, removed[i]->lun); if (sdev != NULL) { scsi_remove_device(sdev); scsi_device_put(sdev); } else { /* We don't expect to get here. * future cmds to this device will get selection * timeout as if the device was gone. */ dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " " for removal.", hostno, removed[i]->bus, removed[i]->target, removed[i]->lun); } kfree(removed[i]); removed[i] = NULL; } /* Notify scsi mid layer of any added devices */ for (i = 0; i < nadded; i++) { if (scsi_add_device(sh, added[i]->bus, added[i]->target, added[i]->lun) == 0) continue; dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " "device not added.\n", hostno, added[i]->bus, added[i]->target, added[i]->lun); /* now we have to remove it from h->dev, * since it didn't get added to scsi mid layer */ fixup_botched_add(h, added[i]); } free_and_out: kfree(added); kfree(removed); } /* * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t * * Assume's h->devlock is held. */ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, int bus, int target, int lun) { int i; struct hpsa_scsi_dev_t *sd; for (i = 0; i < h->ndevices; i++) { sd = h->dev[i]; if (sd->bus == bus && sd->target == target && sd->lun == lun) return sd; } return NULL; } /* link sdev->hostdata to our per-device structure. */ static int hpsa_slave_alloc(struct scsi_device *sdev) { struct hpsa_scsi_dev_t *sd; unsigned long flags; struct ctlr_info *h; h = sdev_to_hba(sdev); spin_lock_irqsave(&h->devlock, flags); sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), sdev_id(sdev), sdev->lun); if (sd != NULL) sdev->hostdata = sd; spin_unlock_irqrestore(&h->devlock, flags); return 0; } static void hpsa_slave_destroy(struct scsi_device *sdev) { /* nothing to do. */ } static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) { int i; if (!h->cmd_sg_list) return; for (i = 0; i < h->nr_cmds; i++) { kfree(h->cmd_sg_list[i]); h->cmd_sg_list[i] = NULL; } kfree(h->cmd_sg_list); h->cmd_sg_list = NULL; } static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) { int i; if (h->chainsize <= 0) return 0; h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, GFP_KERNEL); if (!h->cmd_sg_list) return -ENOMEM; for (i = 0; i < h->nr_cmds; i++) { h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * h->chainsize, GFP_KERNEL); if (!h->cmd_sg_list[i]) goto clean; } return 0; clean: hpsa_free_sg_chain_blocks(h); return -ENOMEM; } static void hpsa_map_sg_chain_block(struct ctlr_info *h, struct CommandList *c) { struct SGDescriptor *chain_sg, *chain_block; u64 temp64; chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; chain_block = h->cmd_sg_list[c->cmdindex]; chain_sg->Ext = HPSA_SG_CHAIN; chain_sg->Len = sizeof(*chain_sg) * (c->Header.SGTotal - h->max_cmd_sg_entries); temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, PCI_DMA_TODEVICE); chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); } static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, struct CommandList *c) { struct SGDescriptor *chain_sg; union u64bit temp64; if (c->Header.SGTotal <= h->max_cmd_sg_entries) return; chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; temp64.val32.lower = chain_sg->Addr.lower; temp64.val32.upper = chain_sg->Addr.upper; pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); } static void complete_scsi_command(struct CommandList *cp) { struct scsi_cmnd *cmd; struct ctlr_info *h; struct ErrorInfo *ei; unsigned char sense_key; unsigned char asc; /* additional sense code */ unsigned char ascq; /* additional sense code qualifier */ unsigned long sense_data_size; ei = cp->err_info; cmd = (struct scsi_cmnd *) cp->scsi_cmd; h = cp->h; scsi_dma_unmap(cmd); /* undo the DMA mappings */ if (cp->Header.SGTotal > h->max_cmd_sg_entries) hpsa_unmap_sg_chain_block(h, cp); cmd->result = (DID_OK << 16); /* host byte */ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ cmd->result |= ei->ScsiStatus; /* copy the sense data whether we need to or not. */ if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) sense_data_size = SCSI_SENSE_BUFFERSIZE; else sense_data_size = sizeof(ei->SenseInfo); if (ei->SenseLen < sense_data_size) sense_data_size = ei->SenseLen; memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); scsi_set_resid(cmd, ei->ResidualCnt); if (ei->CommandStatus == 0) { cmd->scsi_done(cmd); cmd_free(h, cp); return; } /* an error has occurred */ switch (ei->CommandStatus) { case CMD_TARGET_STATUS: if (ei->ScsiStatus) { /* Get sense key */ sense_key = 0xf & ei->SenseInfo[2]; /* Get additional sense code */ asc = ei->SenseInfo[12]; /* Get addition sense code qualifier */ ascq = ei->SenseInfo[13]; } if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { if (check_for_unit_attention(h, cp)) { cmd->result = DID_SOFT_ERROR << 16; break; } if (sense_key == ILLEGAL_REQUEST) { /* * SCSI REPORT_LUNS is commonly unsupported on * Smart Array. Suppress noisy complaint. */ if (cp->Request.CDB[0] == REPORT_LUNS) break; /* If ASC/ASCQ indicate Logical Unit * Not Supported condition, */ if ((asc == 0x25) && (ascq == 0x0)) { dev_warn(&h->pdev->dev, "cp %p " "has check condition\n", cp); break; } } if (sense_key == NOT_READY) { /* If Sense is Not Ready, Logical Unit * Not ready, Manual Intervention * required */ if ((asc == 0x04) && (ascq == 0x03)) { dev_warn(&h->pdev->dev, "cp %p " "has check condition: unit " "not ready, manual " "intervention required\n", cp); break; } } if (sense_key == ABORTED_COMMAND) { /* Aborted command is retryable */ dev_warn(&h->pdev->dev, "cp %p " "has check condition: aborted command: " "ASC: 0x%x, ASCQ: 0x%x\n", cp, asc, ascq); cmd->result = DID_SOFT_ERROR << 16; break; } /* Must be some other type of check condition */ dev_warn(&h->pdev->dev, "cp %p has check condition: " "unknown type: " "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " "Returning result: 0x%x, " "cmd=[%02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x]\n", cp, sense_key, asc, ascq, cmd->result, cmd->cmnd[0], cmd->cmnd[1], cmd->cmnd[2], cmd->cmnd[3], cmd->cmnd[4], cmd->cmnd[5], cmd->cmnd[6], cmd->cmnd[7], cmd->cmnd[8], cmd->cmnd[9], cmd->cmnd[10], cmd->cmnd[11], cmd->cmnd[12], cmd->cmnd[13], cmd->cmnd[14], cmd->cmnd[15]); break; } /* Problem was not a check condition * Pass it up to the upper layers... */ if (ei->ScsiStatus) { dev_warn(&h->pdev->dev, "cp %p has status 0x%x " "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " "Returning result: 0x%x\n", cp, ei->ScsiStatus, sense_key, asc, ascq, cmd->result); } else { /* scsi status is zero??? How??? */ dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " "Returning no connection.\n", cp), /* Ordinarily, this case should never happen, * but there is a bug in some released firmware * revisions that allows it to happen if, for * example, a 4100 backplane loses power and * the tape drive is in it. We assume that * it's a fatal error of some kind because we * can't show that it wasn't. We will make it * look like selection timeout since that is * the most common reason for this to occur, * and it's severe enough. */ cmd->result = DID_NO_CONNECT << 16; } break; case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ break; case CMD_DATA_OVERRUN: dev_warn(&h->pdev->dev, "cp %p has" " completed with data overrun " "reported\n", cp); break; case CMD_INVALID: { /* print_bytes(cp, sizeof(*cp), 1, 0); print_cmd(cp); */ /* We get CMD_INVALID if you address a non-existent device * instead of a selection timeout (no response). You will * see this if you yank out a drive, then try to access it. * This is kind of a shame because it means that any other * CMD_INVALID (e.g. driver bug) will get interpreted as a * missing target. */ cmd->result = DID_NO_CONNECT << 16; } break; case CMD_PROTOCOL_ERR: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p has " "protocol error\n", cp); break; case CMD_HARDWARE_ERR: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); break; case CMD_CONNECTION_LOST: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); break; case CMD_ABORTED: cmd->result = DID_ABORT << 16; dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", cp, ei->ScsiStatus); break; case CMD_ABORT_FAILED: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); break; case CMD_UNSOLICITED_ABORT: cmd->result = DID_RESET << 16; dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " "abort\n", cp); break; case CMD_TIMEOUT: cmd->result = DID_TIME_OUT << 16; dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); break; case CMD_UNABORTABLE: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "Command unabortable\n"); break; default: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", cp, ei->CommandStatus); } cmd->scsi_done(cmd); cmd_free(h, cp); } static int hpsa_scsi_detect(struct ctlr_info *h) { struct Scsi_Host *sh; int error; sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); if (sh == NULL) goto fail; sh->io_port = 0; sh->n_io_port = 0; sh->this_id = -1; sh->max_channel = 3; sh->max_cmd_len = MAX_COMMAND_SIZE; sh->max_lun = HPSA_MAX_LUN; sh->max_id = HPSA_MAX_LUN; sh->can_queue = h->nr_cmds; sh->cmd_per_lun = h->nr_cmds; sh->sg_tablesize = h->maxsgentries; h->scsi_host = sh; sh->hostdata[0] = (unsigned long) h; sh->irq = h->intr[h->intr_mode]; sh->unique_id = sh->irq; error = scsi_add_host(sh, &h->pdev->dev); if (error) goto fail_host_put; scsi_scan_host(sh); return 0; fail_host_put: dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" " failed for controller %d\n", h->ctlr); scsi_host_put(sh); return error; fail: dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" " failed for controller %d\n", h->ctlr); return -ENOMEM; } static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c, int sg_used, int data_direction) { int i; union u64bit addr64; for (i = 0; i < sg_used; i++) { addr64.val32.lower = c->SG[i].Addr.lower; addr64.val32.upper = c->SG[i].Addr.upper; pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, data_direction); } } static void hpsa_map_one(struct pci_dev *pdev, struct CommandList *cp, unsigned char *buf, size_t buflen, int data_direction) { u64 addr64; if (buflen == 0 || data_direction == PCI_DMA_NONE) { cp->Header.SGList = 0; cp->Header.SGTotal = 0; return; } addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); cp->SG[0].Addr.lower = (u32) (addr64 & (u64) 0x00000000FFFFFFFF); cp->SG[0].Addr.upper = (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); cp->SG[0].Len = buflen; cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ } static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, struct CommandList *c) { DECLARE_COMPLETION_ONSTACK(wait); c->waiting = &wait; enqueue_cmd_and_start_io(h, c); wait_for_completion(&wait); } static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, struct CommandList *c, int data_direction) { int retry_count = 0; do { memset(c->err_info, 0, sizeof(*c->err_info)); hpsa_scsi_do_simple_cmd_core(h, c); retry_count++; } while (check_for_unit_attention(h, c) && retry_count <= 3); hpsa_pci_unmap(h->pdev, c, 1, data_direction); } static void hpsa_scsi_interpret_error(struct CommandList *cp) { struct ErrorInfo *ei; struct device *d = &cp->h->pdev->dev; ei = cp->err_info; switch (ei->CommandStatus) { case CMD_TARGET_STATUS: dev_warn(d, "cmd %p has completed with errors\n", cp); dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, ei->ScsiStatus); if (ei->ScsiStatus == 0) dev_warn(d, "SCSI status is abnormally zero. " "(probably indicates selection timeout " "reported incorrectly due to a known " "firmware bug, circa July, 2001.)\n"); break; case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ dev_info(d, "UNDERRUN\n"); break; case CMD_DATA_OVERRUN: dev_warn(d, "cp %p has completed with data overrun\n", cp); break; case CMD_INVALID: { /* controller unfortunately reports SCSI passthru's * to non-existent targets as invalid commands. */ dev_warn(d, "cp %p is reported invalid (probably means " "target device no longer present)\n", cp); /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); print_cmd(cp); */ } break; case CMD_PROTOCOL_ERR: dev_warn(d, "cp %p has protocol error \n", cp); break; case CMD_HARDWARE_ERR: /* cmd->result = DID_ERROR << 16; */ dev_warn(d, "cp %p had hardware error\n", cp); break; case CMD_CONNECTION_LOST: dev_warn(d, "cp %p had connection lost\n", cp); break; case CMD_ABORTED: dev_warn(d, "cp %p was aborted\n", cp); break; case CMD_ABORT_FAILED: dev_warn(d, "cp %p reports abort failed\n", cp); break; case CMD_UNSOLICITED_ABORT: dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); break; case CMD_TIMEOUT: dev_warn(d, "cp %p timed out\n", cp); break; case CMD_UNABORTABLE: dev_warn(d, "Command unabortable\n"); break; default: dev_warn(d, "cp %p returned unknown status %x\n", cp, ei->CommandStatus); } } static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, unsigned char page, unsigned char *buf, unsigned char bufsize) { int rc = IO_OK; struct CommandList *c; struct ErrorInfo *ei; c = cmd_special_alloc(h); if (c == NULL) { /* trouble... */ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); return -ENOMEM; } fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(c); rc = -1; } cmd_special_free(h, c); return rc; } static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) { int rc = IO_OK; struct CommandList *c; struct ErrorInfo *ei; c = cmd_special_alloc(h); if (c == NULL) { /* trouble... */ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); return -ENOMEM; } fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); hpsa_scsi_do_simple_cmd_core(h, c); /* no unmap needed here because no data xfer. */ ei = c->err_info; if (ei->CommandStatus != 0) { hpsa_scsi_interpret_error(c); rc = -1; } cmd_special_free(h, c); return rc; } static void hpsa_get_raid_level(struct ctlr_info *h, unsigned char *scsi3addr, unsigned char *raid_level) { int rc; unsigned char *buf; *raid_level = RAID_UNKNOWN; buf = kzalloc(64, GFP_KERNEL); if (!buf) return; rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); if (rc == 0) *raid_level = buf[8]; if (*raid_level > RAID_UNKNOWN) *raid_level = RAID_UNKNOWN; kfree(buf); return; } /* Get the device id from inquiry page 0x83 */ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, unsigned char *device_id, int buflen) { int rc; unsigned char *buf; if (buflen > 16) buflen = 16; buf = kzalloc(64, GFP_KERNEL); if (!buf) return -1; rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); if (rc == 0) memcpy(device_id, &buf[8], buflen); kfree(buf); return rc != 0; } static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, struct ReportLUNdata *buf, int bufsize, int extended_response) { int rc = IO_OK; struct CommandList *c; unsigned char scsi3addr[8]; struct ErrorInfo *ei; c = cmd_special_alloc(h); if (c == NULL) { /* trouble... */ dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); return -1; } /* address the controller */ memset(scsi3addr, 0, sizeof(scsi3addr)); fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, buf, bufsize, 0, scsi3addr, TYPE_CMD); if (extended_response) c->Request.CDB[1] = extended_response; hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); ei = c->err_info; if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { hpsa_scsi_interpret_error(c); rc = -1; } cmd_special_free(h, c); return rc; } static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, struct ReportLUNdata *buf, int bufsize, int extended_response) { return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); } static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, struct ReportLUNdata *buf, int bufsize) { return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); } static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, int bus, int target, int lun) { device->bus = bus; device->target = target; device->lun = lun; } static int hpsa_update_device_info(struct ctlr_info *h, unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, unsigned char *is_OBDR_device) { #define OBDR_SIG_OFFSET 43 #define OBDR_TAPE_SIG "$DR-10" #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) unsigned char *inq_buff; unsigned char *obdr_sig; inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); if (!inq_buff) goto bail_out; /* Do an inquiry to the device to see what it is. */ if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { /* Inquiry failed (msg printed already) */ dev_err(&h->pdev->dev, "hpsa_update_device_info: inquiry failed\n"); goto bail_out; } this_device->devtype = (inq_buff[0] & 0x1f); memcpy(this_device->scsi3addr, scsi3addr, 8); memcpy(this_device->vendor, &inq_buff[8], sizeof(this_device->vendor)); memcpy(this_device->model, &inq_buff[16], sizeof(this_device->model)); memset(this_device->device_id, 0, sizeof(this_device->device_id)); hpsa_get_device_id(h, scsi3addr, this_device->device_id, sizeof(this_device->device_id)); if (this_device->devtype == TYPE_DISK && is_logical_dev_addr_mode(scsi3addr)) hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); else this_device->raid_level = RAID_UNKNOWN; if (is_OBDR_device) { /* See if this is a One-Button-Disaster-Recovery device * by looking for "$DR-10" at offset 43 in inquiry data. */ obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; *is_OBDR_device = (this_device->devtype == TYPE_ROM && strncmp(obdr_sig, OBDR_TAPE_SIG, OBDR_SIG_LEN) == 0); } kfree(inq_buff); return 0; bail_out: kfree(inq_buff); return 1; } static unsigned char *msa2xxx_model[] = { "MSA2012", "MSA2024", "MSA2312", "MSA2324", "P2000 G3 SAS", NULL, }; static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) { int i; for (i = 0; msa2xxx_model[i]; i++) if (strncmp(device->model, msa2xxx_model[i], strlen(msa2xxx_model[i])) == 0) return 1; return 0; } /* Helper function to assign bus, target, lun mapping of devices. * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. * Logical drive target and lun are assigned at this time, but * physical device lun and target assignment are deferred (assigned * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) */ static void figure_bus_target_lun(struct ctlr_info *h, u8 *lunaddrbytes, int *bus, int *target, int *lun, struct hpsa_scsi_dev_t *device) { u32 lunid; if (is_logical_dev_addr_mode(lunaddrbytes)) { /* logical device */ lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); if (is_msa2xxx(h, device)) { /* msa2xxx way, put logicals on bus 1 * and match target/lun numbers box * reports. */ *bus = 1; *target = (lunid >> 16) & 0x3fff; *lun = lunid & 0x00ff; } else { if (likely(is_scsi_rev_5(h))) { /* All current smart arrays (circa 2011) */ *bus = 0; *target = 0; *lun = (lunid & 0x3fff) + 1; } else { /* Traditional old smart array way. */ *bus = 0; *target = lunid & 0x3fff; *lun = 0; } } } else { /* physical device */ if (is_hba_lunid(lunaddrbytes)) if (unlikely(is_scsi_rev_5(h))) { *bus = 0; /* put p1210m ctlr at 0,0,0 */ *target = 0; *lun = 0; return; } else *bus = 3; /* traditional smartarray */ else *bus = 2; /* physical disk */ *target = -1; *lun = -1; /* we will fill these in later. */ } } /* * If there is no lun 0 on a target, linux won't find any devices. * For the MSA2xxx boxes, we have to manually detect the enclosure * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report * it for some reason. *tmpdevice is the target we're adding, * this_device is a pointer into the current element of currentsd[] * that we're building up in update_scsi_devices(), below. * lunzerobits is a bitmap that tracks which targets already have a * lun 0 assigned. * Returns 1 if an enclosure was added, 0 if not. */ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *tmpdevice, struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, int bus, int target, int lun, unsigned long lunzerobits[], int *nmsa2xxx_enclosures) { unsigned char scsi3addr[8]; if (test_bit(target, lunzerobits)) return 0; /* There is already a lun 0 on this target. */ if (!is_logical_dev_addr_mode(lunaddrbytes)) return 0; /* It's the logical targets that may lack lun 0. */ if (!is_msa2xxx(h, tmpdevice)) return 0; /* It's only the MSA2xxx that have this problem. */ if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ return 0; memset(scsi3addr, 0, 8); scsi3addr[3] = target; if (is_hba_lunid(scsi3addr)) return 0; /* Don't add the RAID controller here. */ if (is_scsi_rev_5(h)) return 0; /* p1210m doesn't need to do this. */ #define MAX_MSA2XXX_ENCLOSURES 32 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " "enclosures exceeded. Check your hardware " "configuration."); return 0; } if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) return 0; (*nmsa2xxx_enclosures)++; hpsa_set_bus_target_lun(this_device, bus, target, 0); set_bit(target, lunzerobits); return 1; } /* * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, * logdev. The number of luns in physdev and logdev are returned in * *nphysicals and *nlogicals, respectively. * Returns 0 on success, -1 otherwise. */ static int hpsa_gather_lun_info(struct ctlr_info *h, int reportlunsize, struct ReportLUNdata *physdev, u32 *nphysicals, struct ReportLUNdata *logdev, u32 *nlogicals) { if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); return -1; } *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; if (*nphysicals > HPSA_MAX_PHYS_LUN) { dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN); *nphysicals = HPSA_MAX_PHYS_LUN; } if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); return -1; } *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; /* Reject Logicals in excess of our max capability. */ if (*nlogicals > HPSA_MAX_LUN) { dev_warn(&h->pdev->dev, "maximum logical LUNs (%d) exceeded. " "%d LUNs ignored.\n", HPSA_MAX_LUN, *nlogicals - HPSA_MAX_LUN); *nlogicals = HPSA_MAX_LUN; } if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { dev_warn(&h->pdev->dev, "maximum logical + physical LUNs (%d) exceeded. " "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; } return 0; } u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, struct ReportLUNdata *logdev_list) { /* Helper function, figure out where the LUN ID info is coming from * given index i, lists of physical and logical devices, where in * the list the raid controller is supposed to appear (first or last) */ int logicals_start = nphysicals + (raid_ctlr_position == 0); int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); if (i == raid_ctlr_position) return RAID_CTLR_LUNID; if (i < logicals_start) return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; if (i < last_device) return &logdev_list->LUN[i - nphysicals - (raid_ctlr_position == 0)][0]; BUG(); return NULL; } static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) { /* the idea here is we could get notified * that some devices have changed, so we do a report * physical luns and report logical luns cmd, and adjust * our list of devices accordingly. * * The scsi3addr's of devices won't change so long as the * adapter is not reset. That means we can rescan and * tell which devices we already know about, vs. new * devices, vs. disappearing devices. */ struct ReportLUNdata *physdev_list = NULL; struct ReportLUNdata *logdev_list = NULL; u32 nphysicals = 0; u32 nlogicals = 0; u32 ndev_allocated = 0; struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; int ncurrent = 0; int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; int i, nmsa2xxx_enclosures, ndevs_to_allocate; int bus, target, lun; int raid_ctlr_position; DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, GFP_KERNEL); physdev_list = kzalloc(reportlunsize, GFP_KERNEL); logdev_list = kzalloc(reportlunsize, GFP_KERNEL); tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) { dev_err(&h->pdev->dev, "out of memory\n"); goto out; } memset(lunzerobits, 0, sizeof(lunzerobits)); if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, logdev_list, &nlogicals)) goto out; /* We might see up to 32 MSA2xxx enclosures, actually 8 of them * but each of them 4 times through different paths. The plus 1 * is for the RAID controller. */ ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1; /* Allocate the per device structures */ for (i = 0; i < ndevs_to_allocate; i++) { currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); if (!currentsd[i]) { dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", __FILE__, __LINE__); goto out; } ndev_allocated++; } if (unlikely(is_scsi_rev_5(h))) raid_ctlr_position = 0; else raid_ctlr_position = nphysicals + nlogicals; /* adjust our table of devices */ nmsa2xxx_enclosures = 0; for (i = 0; i < nphysicals + nlogicals + 1; i++) { u8 *lunaddrbytes, is_OBDR = 0; /* Figure out where the LUN ID info is coming from */ lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, i, nphysicals, nlogicals, physdev_list, logdev_list); /* skip masked physical devices. */ if (lunaddrbytes[3] & 0xC0 && i < nphysicals + (raid_ctlr_position == 0)) continue; /* Get device type, vendor, model, device id */ if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, &is_OBDR)) continue; /* skip it if we can't talk to it. */ figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, tmpdevice); this_device = currentsd[ncurrent]; /* * For the msa2xxx boxes, we have to insert a LUN 0 which * doesn't show up in CCISS_REPORT_PHYSICAL data, but there * is nonetheless an enclosure device there. We have to * present that otherwise linux won't find anything if * there is no lun 0. */ if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device, lunaddrbytes, bus, target, lun, lunzerobits, &nmsa2xxx_enclosures)) { ncurrent++; this_device = currentsd[ncurrent]; } *this_device = *tmpdevice; hpsa_set_bus_target_lun(this_device, bus, target, lun); switch (this_device->devtype) { case TYPE_ROM: /* We don't *really* support actual CD-ROM devices, * just "One Button Disaster Recovery" tape drive * which temporarily pretends to be a CD-ROM drive. * So we check that the device is really an OBDR tape * device by checking for "$DR-10" in bytes 43-48 of * the inquiry data. */ if (is_OBDR) ncurrent++; break; case TYPE_DISK: if (i < nphysicals) break; ncurrent++; break; case TYPE_TAPE: case TYPE_MEDIUM_CHANGER: ncurrent++; break; case TYPE_RAID: /* Only present the Smartarray HBA as a RAID controller. * If it's a RAID controller other than the HBA itself * (an external RAID controller, MSA500 or similar) * don't present it. */ if (!is_hba_lunid(lunaddrbytes)) break; ncurrent++; break; default: break; } if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA) break; } adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); out: kfree(tmpdevice); for (i = 0; i < ndev_allocated; i++) kfree(currentsd[i]); kfree(currentsd); kfree(physdev_list); kfree(logdev_list); } /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci * dma mapping and fills in the scatter gather entries of the * hpsa command, cp. */ static int hpsa_scatter_gather(struct ctlr_info *h, struct CommandList *cp, struct scsi_cmnd *cmd) { unsigned int len; struct scatterlist *sg; u64 addr64; int use_sg, i, sg_index, chained; struct SGDescriptor *curr_sg; BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); use_sg = scsi_dma_map(cmd); if (use_sg < 0) return use_sg; if (!use_sg) goto sglist_finished; curr_sg = cp->SG; chained = 0; sg_index = 0; scsi_for_each_sg(cmd, sg, use_sg, i) { if (i == h->max_cmd_sg_entries - 1 && use_sg > h->max_cmd_sg_entries) { chained = 1; curr_sg = h->cmd_sg_list[cp->cmdindex]; sg_index = 0; } addr64 = (u64) sg_dma_address(sg); len = sg_dma_len(sg); curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); curr_sg->Len = len; curr_sg->Ext = 0; /* we are not chaining */ curr_sg++; } if (use_sg + chained > h->maxSG) h->maxSG = use_sg + chained; if (chained) { cp->Header.SGList = h->max_cmd_sg_entries; cp->Header.SGTotal = (u16) (use_sg + 1); hpsa_map_sg_chain_block(h, cp); return 0; } sglist_finished: cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ return 0; } static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct ctlr_info *h; struct hpsa_scsi_dev_t *dev; unsigned char scsi3addr[8]; struct CommandList *c; unsigned long flags; /* Get the ptr to our adapter structure out of cmd->host. */ h = sdev_to_hba(cmd->device); dev = cmd->device->hostdata; if (!dev) { cmd->result = DID_NO_CONNECT << 16; done(cmd); return 0; } memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); /* Need a lock as this is being allocated from the pool */ spin_lock_irqsave(&h->lock, flags); c = cmd_alloc(h); spin_unlock_irqrestore(&h->lock, flags); if (c == NULL) { /* trouble... */ dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); return SCSI_MLQUEUE_HOST_BUSY; } /* Fill in the command list header */ cmd->scsi_done = done; /* save this for use by completion code */ /* save c in case we have to abort it */ cmd->host_scribble = (unsigned char *) c; c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; c->Header.ReplyQueue = 0; /* unused in simple mode */ memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; /* Fill in the request block... */ c->Request.Timeout = 0; memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); c->Request.CDBLen = cmd->cmd_len; memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); c->Request.Type.Type = TYPE_CMD; c->Request.Type.Attribute = ATTR_SIMPLE; switch (cmd->sc_data_direction) { case DMA_TO_DEVICE: c->Request.Type.Direction = XFER_WRITE; break; case DMA_FROM_DEVICE: c->Request.Type.Direction = XFER_READ; break; case DMA_NONE: c->Request.Type.Direction = XFER_NONE; break; case DMA_BIDIRECTIONAL: /* This can happen if a buggy application does a scsi passthru * and sets both inlen and outlen to non-zero. ( see * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) */ c->Request.Type.Direction = XFER_RSVD; /* This is technically wrong, and hpsa controllers should * reject it with CMD_INVALID, which is the most correct * response, but non-fibre backends appear to let it * slide by, and give the same results as if this field * were set correctly. Either way is acceptable for * our purposes here. */ break; default: dev_err(&h->pdev->dev, "unknown data direction: %d\n", cmd->sc_data_direction); BUG(); break; } if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ cmd_free(h, c); return SCSI_MLQUEUE_HOST_BUSY; } enqueue_cmd_and_start_io(h, c); /* the cmd'll come back via intr handler in complete_scsi_command() */ return 0; } static DEF_SCSI_QCMD(hpsa_scsi_queue_command) static void hpsa_scan_start(struct Scsi_Host *sh) { struct ctlr_info *h = shost_to_hba(sh); unsigned long flags; /* wait until any scan already in progress is finished. */ while (1) { spin_lock_irqsave(&h->scan_lock, flags); if (h->scan_finished) break; spin_unlock_irqrestore(&h->scan_lock, flags); wait_event(h->scan_wait_queue, h->scan_finished); /* Note: We don't need to worry about a race between this * thread and driver unload because the midlayer will * have incremented the reference count, so unload won't * happen if we're in here. */ } h->scan_finished = 0; /* mark scan as in progress */ spin_unlock_irqrestore(&h->scan_lock, flags); hpsa_update_scsi_devices(h, h->scsi_host->host_no); spin_lock_irqsave(&h->scan_lock, flags); h->scan_finished = 1; /* mark scan as finished. */ wake_up_all(&h->scan_wait_queue); spin_unlock_irqrestore(&h->scan_lock, flags); } static int hpsa_scan_finished(struct Scsi_Host *sh, unsigned long elapsed_time) { struct ctlr_info *h = shost_to_hba(sh); unsigned long flags; int finished; spin_lock_irqsave(&h->scan_lock, flags); finished = h->scan_finished; spin_unlock_irqrestore(&h->scan_lock, flags); return finished; } static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) { struct ctlr_info *h = sdev_to_hba(sdev); if (reason != SCSI_QDEPTH_DEFAULT) return -ENOTSUPP; if (qdepth < 1) qdepth = 1; else if (qdepth > h->nr_cmds) qdepth = h->nr_cmds; scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); return sdev->queue_depth; } static void hpsa_unregister_scsi(struct ctlr_info *h) { /* we are being forcibly unloaded, and may not refuse. */ scsi_remove_host(h->scsi_host); scsi_host_put(h->scsi_host); h->scsi_host = NULL; } static int hpsa_register_scsi(struct ctlr_info *h) { int rc; rc = hpsa_scsi_detect(h); if (rc != 0) dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" " hpsa_scsi_detect(), rc is %d\n", rc); return rc; } static int wait_for_device_to_become_ready(struct ctlr_info *h, unsigned char lunaddr[]) { int rc = 0; int count = 0; int waittime = 1; /* seconds */ struct CommandList *c; c = cmd_special_alloc(h); if (!c) { dev_warn(&h->pdev->dev, "out of memory in " "wait_for_device_to_become_ready.\n"); return IO_ERROR; } /* Send test unit ready until device ready, or give up. */ while (count < HPSA_TUR_RETRY_LIMIT) { /* Wait for a bit. do this first, because if we send * the TUR right away, the reset will just abort it. */ msleep(1000 * waittime); count++; /* Increase wait time with each try, up to a point. */ if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) waittime = waittime * 2; /* Send the Test Unit Ready */ fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); hpsa_scsi_do_simple_cmd_core(h, c); /* no unmap needed here because no data xfer. */ if (c->err_info->CommandStatus == CMD_SUCCESS) break; if (c->err_info->CommandStatus == CMD_TARGET_STATUS && c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && (c->err_info->SenseInfo[2] == NO_SENSE || c->err_info->SenseInfo[2] == UNIT_ATTENTION)) break; dev_warn(&h->pdev->dev, "waiting %d secs " "for device to become ready.\n", waittime); rc = 1; /* device not ready. */ } if (rc) dev_warn(&h->pdev->dev, "giving up on device.\n"); else dev_warn(&h->pdev->dev, "device is ready.\n"); cmd_special_free(h, c); return rc; } /* Need at least one of these error handlers to keep ../scsi/hosts.c from * complaining. Doing a host- or bus-reset can't do anything good here. */ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) { int rc; struct ctlr_info *h; struct hpsa_scsi_dev_t *dev; /* find the controller to which the command to be aborted was sent */ h = sdev_to_hba(scsicmd->device); if (h == NULL) /* paranoia */ return FAILED; dev = scsicmd->device->hostdata; if (!dev) { dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " "device lookup failed.\n"); return FAILED; } dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", h->scsi_host->host_no, dev->bus, dev->target, dev->lun); /* send a reset to the SCSI LUN which the command was sent to */ rc = hpsa_send_reset(h, dev->scsi3addr); if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) return SUCCESS; dev_warn(&h->pdev->dev, "resetting device failed.\n"); return FAILED; } /* * For operations that cannot sleep, a command block is allocated at init, * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track * which ones are free or in use. Lock must be held when calling this. * cmd_free() is the complement. */ static struct CommandList *cmd_alloc(struct ctlr_info *h) { struct CommandList *c; int i; union u64bit temp64; dma_addr_t cmd_dma_handle, err_dma_handle; do { i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); if (i == h->nr_cmds) return NULL; } while (test_and_set_bit (i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); c = h->cmd_pool + i; memset(c, 0, sizeof(*c)); cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c); c->err_info = h->errinfo_pool + i; memset(c->err_info, 0, sizeof(*c->err_info)); err_dma_handle = h->errinfo_pool_dhandle + i * sizeof(*c->err_info); h->nr_allocs++; c->cmdindex = i; INIT_LIST_HEAD(&c->list); c->busaddr = (u32) cmd_dma_handle; temp64.val = (u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; c->ErrDesc.Addr.upper = temp64.val32.upper; c->ErrDesc.Len = sizeof(*c->err_info); c->h = h; return c; } /* For operations that can wait for kmalloc to possibly sleep, * this routine can be called. Lock need not be held to call * cmd_special_alloc. cmd_special_free() is the complement. */ static struct CommandList *cmd_special_alloc(struct ctlr_info *h) { struct CommandList *c; union u64bit temp64; dma_addr_t cmd_dma_handle, err_dma_handle; c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); if (c == NULL) return NULL; memset(c, 0, sizeof(*c)); c->cmdindex = -1; c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), &err_dma_handle); if (c->err_info == NULL) { pci_free_consistent(h->pdev, sizeof(*c), c, cmd_dma_handle); return NULL; } memset(c->err_info, 0, sizeof(*c->err_info)); INIT_LIST_HEAD(&c->list); c->busaddr = (u32) cmd_dma_handle; temp64.val = (u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; c->ErrDesc.Addr.upper = temp64.val32.upper; c->ErrDesc.Len = sizeof(*c->err_info); c->h = h; return c; } static void cmd_free(struct ctlr_info *h, struct CommandList *c) { int i; i = c - h->cmd_pool; clear_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)); h->nr_frees++; } static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) { union u64bit temp64; temp64.val32.lower = c->ErrDesc.Addr.lower; temp64.val32.upper = c->ErrDesc.Addr.upper; pci_free_consistent(h->pdev, sizeof(*c->err_info), c->err_info, (dma_addr_t) temp64.val); pci_free_consistent(h->pdev, sizeof(*c), c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); } #ifdef CONFIG_COMPAT static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) { IOCTL32_Command_struct __user *arg32 = (IOCTL32_Command_struct __user *) arg; IOCTL_Command_struct arg64; IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); int err; u32 cp; memset(&arg64, 0, sizeof(arg64)); err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request)); err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info)); err |= get_user(arg64.buf_size, &arg32->buf_size); err |= get_user(cp, &arg32->buf); arg64.buf = compat_ptr(cp); err |= copy_to_user(p, &arg64, sizeof(arg64)); if (err) return -EFAULT; err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); if (err) return err; err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info)); if (err) return -EFAULT; return err; } static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, int cmd, void *arg) { BIG_IOCTL32_Command_struct __user *arg32 = (BIG_IOCTL32_Command_struct __user *) arg; BIG_IOCTL_Command_struct arg64; BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); int err; u32 cp; memset(&arg64, 0, sizeof(arg64)); err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request)); err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info)); err |= get_user(arg64.buf_size, &arg32->buf_size); err |= get_user(arg64.malloc_size, &arg32->malloc_size); err |= get_user(cp, &arg32->buf); arg64.buf = compat_ptr(cp); err |= copy_to_user(p, &arg64, sizeof(arg64)); if (err) return -EFAULT; err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); if (err) return err; err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info)); if (err) return -EFAULT; return err; } static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) { switch (cmd) { case CCISS_GETPCIINFO: case CCISS_GETINTINFO: case CCISS_SETINTINFO: case CCISS_GETNODENAME: case CCISS_SETNODENAME: case CCISS_GETHEARTBEAT: case CCISS_GETBUSTYPES: case CCISS_GETFIRMVER: case CCISS_GETDRIVVER: case CCISS_REVALIDVOLS: case CCISS_DEREGDISK: case CCISS_REGNEWDISK: case CCISS_REGNEWD: case CCISS_RESCANDISK: case CCISS_GETLUNINFO: return hpsa_ioctl(dev, cmd, arg); case CCISS_PASSTHRU32: return hpsa_ioctl32_passthru(dev, cmd, arg); case CCISS_BIG_PASSTHRU32: return hpsa_ioctl32_big_passthru(dev, cmd, arg); default: return -ENOIOCTLCMD; } } #endif static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) { struct hpsa_pci_info pciinfo; if (!argp) return -EINVAL; pciinfo.domain = pci_domain_nr(h->pdev->bus); pciinfo.bus = h->pdev->bus->number; pciinfo.dev_fn = h->pdev->devfn; pciinfo.board_id = h->board_id; if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) return -EFAULT; return 0; } static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) { DriverVer_type DriverVer; unsigned char vmaj, vmin, vsubmin; int rc; rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", &vmaj, &vmin, &vsubmin); if (rc != 3) { dev_info(&h->pdev->dev, "driver version string '%s' " "unrecognized.", HPSA_DRIVER_VERSION); vmaj = 0; vmin = 0; vsubmin = 0; } DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; if (!argp) return -EINVAL; if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) return -EFAULT; return 0; } static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) { IOCTL_Command_struct iocommand; struct CommandList *c; char *buff = NULL; union u64bit temp64; if (!argp) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; if (copy_from_user(&iocommand, argp, sizeof(iocommand))) return -EFAULT; if ((iocommand.buf_size < 1) && (iocommand.Request.Type.Direction != XFER_NONE)) { return -EINVAL; } if (iocommand.buf_size > 0) { buff = kmalloc(iocommand.buf_size, GFP_KERNEL); if (buff == NULL) return -EFAULT; if (iocommand.Request.Type.Direction == XFER_WRITE) { /* Copy the data into the buffer we created */ if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { kfree(buff); return -EFAULT; } } else { memset(buff, 0, iocommand.buf_size); } } c = cmd_special_alloc(h); if (c == NULL) { kfree(buff); return -ENOMEM; } /* Fill in the command type */ c->cmd_type = CMD_IOCTL_PEND; /* Fill in Command Header */ c->Header.ReplyQueue = 0; /* unused in simple mode */ if (iocommand.buf_size > 0) { /* buffer to fill */ c->Header.SGList = 1; c->Header.SGTotal = 1; } else { /* no buffers to fill */ c->Header.SGList = 0; c->Header.SGTotal = 0; } memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); /* use the kernel address the cmd block for tag */ c->Header.Tag.lower = c->busaddr; /* Fill in Request block */ memcpy(&c->Request, &iocommand.Request, sizeof(c->Request)); /* Fill in the scatter gather information */ if (iocommand.buf_size > 0) { temp64.val = pci_map_single(h->pdev, buff, iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); c->SG[0].Addr.lower = temp64.val32.lower; c->SG[0].Addr.upper = temp64.val32.upper; c->SG[0].Len = iocommand.buf_size; c->SG[0].Ext = 0; /* we are not chaining*/ } hpsa_scsi_do_simple_cmd_core(h, c); if (iocommand.buf_size > 0) hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); /* Copy the error information out */ memcpy(&iocommand.error_info, c->err_info, sizeof(iocommand.error_info)); if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { kfree(buff); cmd_special_free(h, c); return -EFAULT; } if (iocommand.Request.Type.Direction == XFER_READ && iocommand.buf_size > 0) { /* Copy the data out of the buffer we created */ if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { kfree(buff); cmd_special_free(h, c); return -EFAULT; } } kfree(buff); cmd_special_free(h, c); return 0; } static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) { BIG_IOCTL_Command_struct *ioc; struct CommandList *c; unsigned char **buff = NULL; int *buff_size = NULL; union u64bit temp64; BYTE sg_used = 0; int status = 0; int i; u32 left; u32 sz; BYTE __user *data_ptr; if (!argp) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; ioc = (BIG_IOCTL_Command_struct *) kmalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) { status = -ENOMEM; goto cleanup1; } if (copy_from_user(ioc, argp, sizeof(*ioc))) { status = -EFAULT; goto cleanup1; } if ((ioc->buf_size < 1) && (ioc->Request.Type.Direction != XFER_NONE)) { status = -EINVAL; goto cleanup1; } /* Check kmalloc limits using all SGs */ if (ioc->malloc_size > MAX_KMALLOC_SIZE) { status = -EINVAL; goto cleanup1; } if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { status = -EINVAL; goto cleanup1; } buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); if (!buff) { status = -ENOMEM; goto cleanup1; } buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); if (!buff_size) { status = -ENOMEM; goto cleanup1; } left = ioc->buf_size; data_ptr = ioc->buf; while (left) { sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; buff_size[sg_used] = sz; buff[sg_used] = kmalloc(sz, GFP_KERNEL); if (buff[sg_used] == NULL) { status = -ENOMEM; goto cleanup1; } if (ioc->Request.Type.Direction == XFER_WRITE) { if (copy_from_user(buff[sg_used], data_ptr, sz)) { status = -ENOMEM; goto cleanup1; } } else memset(buff[sg_used], 0, sz); left -= sz; data_ptr += sz; sg_used++; } c = cmd_special_alloc(h); if (c == NULL) { status = -ENOMEM; goto cleanup1; } c->cmd_type = CMD_IOCTL_PEND; c->Header.ReplyQueue = 0; c->Header.SGList = c->Header.SGTotal = sg_used; memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); c->Header.Tag.lower = c->busaddr; memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); if (ioc->buf_size > 0) { int i; for (i = 0; i < sg_used; i++) { temp64.val = pci_map_single(h->pdev, buff[i], buff_size[i], PCI_DMA_BIDIRECTIONAL); c->SG[i].Addr.lower = temp64.val32.lower; c->SG[i].Addr.upper = temp64.val32.upper; c->SG[i].Len = buff_size[i]; /* we are not chaining */ c->SG[i].Ext = 0; } } hpsa_scsi_do_simple_cmd_core(h, c); if (sg_used) hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); /* Copy the error information out */ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); if (copy_to_user(argp, ioc, sizeof(*ioc))) { cmd_special_free(h, c); status = -EFAULT; goto cleanup1; } if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { /* Copy the data out of the buffer we created */ BYTE __user *ptr = ioc->buf; for (i = 0; i < sg_used; i++) { if (copy_to_user(ptr, buff[i], buff_size[i])) { cmd_special_free(h, c); status = -EFAULT; goto cleanup1; } ptr += buff_size[i]; } } cmd_special_free(h, c); status = 0; cleanup1: if (buff) { for (i = 0; i < sg_used; i++) kfree(buff[i]); kfree(buff); } kfree(buff_size); kfree(ioc); return status; } static void check_ioctl_unit_attention(struct ctlr_info *h, struct CommandList *c) { if (c->err_info->CommandStatus == CMD_TARGET_STATUS && c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) (void) check_for_unit_attention(h, c); } /* * ioctl */ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) { struct ctlr_info *h; void __user *argp = (void __user *)arg; h = sdev_to_hba(dev); switch (cmd) { case CCISS_DEREGDISK: case CCISS_REGNEWDISK: case CCISS_REGNEWD: hpsa_scan_start(h->scsi_host); return 0; case CCISS_GETPCIINFO: return hpsa_getpciinfo_ioctl(h, argp); case CCISS_GETDRIVVER: return hpsa_getdrivver_ioctl(h, argp); case CCISS_PASSTHRU: return hpsa_passthru_ioctl(h, argp); case CCISS_BIG_PASSTHRU: return hpsa_big_passthru_ioctl(h, argp); default: return -ENOTTY; } } static int __devinit hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, u8 reset_type) { struct CommandList *c; c = cmd_alloc(h); if (!c) return -ENOMEM; fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, RAID_CTLR_LUNID, TYPE_MSG); c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ c->waiting = NULL; enqueue_cmd_and_start_io(h, c); /* Don't wait for completion, the reset won't complete. Don't free * the command either. This is the last command we will send before * re-initializing everything, so it doesn't matter and won't leak. */ return 0; } static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, int cmd_type) { int pci_dir = XFER_NONE; c->cmd_type = CMD_IOCTL_PEND; c->Header.ReplyQueue = 0; if (buff != NULL && size > 0) { c->Header.SGList = 1; c->Header.SGTotal = 1; } else { c->Header.SGList = 0; c->Header.SGTotal = 0; } c->Header.Tag.lower = c->busaddr; memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); c->Request.Type.Type = cmd_type; if (cmd_type == TYPE_CMD) { switch (cmd) { case HPSA_INQUIRY: /* are we trying to read a vital product page */ if (page_code != 0) { c->Request.CDB[1] = 0x01; c->Request.CDB[2] = page_code; } c->Request.CDBLen = 6; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = HPSA_INQUIRY; c->Request.CDB[4] = size & 0xFF; break; case HPSA_REPORT_LOG: case HPSA_REPORT_PHYS: /* Talking to controller so It's a physical command mode = 00 target = 0. Nothing to write. */ c->Request.CDBLen = 12; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0xFF; c->Request.CDB[9] = size & 0xFF; break; case HPSA_CACHE_FLUSH: c->Request.CDBLen = 12; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_WRITE; c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_WRITE; c->Request.CDB[6] = BMIC_CACHE_FLUSH; break; case TEST_UNIT_READY: c->Request.CDBLen = 6; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_NONE; c->Request.Timeout = 0; break; default: dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); BUG(); return; } } else if (cmd_type == TYPE_MSG) { switch (cmd) { case HPSA_DEVICE_RESET_MSG: c->Request.CDBLen = 16; c->Request.Type.Type = 1; /* It is a MSG not a CMD */ c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_NONE; c->Request.Timeout = 0; /* Don't time out */ memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c->Request.CDB[0] = cmd; c->Request.CDB[1] = 0x03; /* Reset target above */ /* If bytes 4-7 are zero, it means reset the */ /* LunID device */ c->Request.CDB[4] = 0x00; c->Request.CDB[5] = 0x00; c->Request.CDB[6] = 0x00; c->Request.CDB[7] = 0x00; break; default: dev_warn(&h->pdev->dev, "unknown message type %d\n", cmd); BUG(); } } else { dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); BUG(); } switch (c->Request.Type.Direction) { case XFER_READ: pci_dir = PCI_DMA_FROMDEVICE; break; case XFER_WRITE: pci_dir = PCI_DMA_TODEVICE; break; case XFER_NONE: pci_dir = PCI_DMA_NONE; break; default: pci_dir = PCI_DMA_BIDIRECTIONAL; } hpsa_map_one(h->pdev, c, buff, size, pci_dir); return; } /* * Map (physical) PCI mem into (virtual) kernel space */ static void __iomem *remap_pci_mem(ulong base, ulong size) { ulong page_base = ((ulong) base) & PAGE_MASK; ulong page_offs = ((ulong) base) - page_base; void __iomem *page_remapped = ioremap(page_base, page_offs + size); return page_remapped ? (page_remapped + page_offs) : NULL; } /* Takes cmds off the submission queue and sends them to the hardware, * then puts them on the queue of cmds waiting for completion. */ static void start_io(struct ctlr_info *h) { struct CommandList *c; while (!list_empty(&h->reqQ)) { c = list_entry(h->reqQ.next, struct CommandList, list); /* can't do anything if fifo is full */ if ((h->access.fifo_full(h))) { dev_warn(&h->pdev->dev, "fifo full\n"); break; } /* Get the first entry from the Request Q */ removeQ(c); h->Qdepth--; /* Tell the controller execute command */ h->access.submit_command(h, c); /* Put job onto the completed Q */ addQ(&h->cmpQ, c); } } static inline unsigned long get_next_completion(struct ctlr_info *h) { return h->access.command_completed(h); } static inline bool interrupt_pending(struct ctlr_info *h) { return h->access.intr_pending(h); } static inline long interrupt_not_for_us(struct ctlr_info *h) { return (h->access.intr_pending(h) == 0) || (h->interrupts_enabled == 0); } static inline int bad_tag(struct ctlr_info *h, u32 tag_index, u32 raw_tag) { if (unlikely(tag_index >= h->nr_cmds)) { dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); return 1; } return 0; } static inline void finish_cmd(struct CommandList *c, u32 raw_tag) { removeQ(c); if (likely(c->cmd_type == CMD_SCSI)) complete_scsi_command(c); else if (c->cmd_type == CMD_IOCTL_PEND) complete(c->waiting); } static inline u32 hpsa_tag_contains_index(u32 tag) { return tag & DIRECT_LOOKUP_BIT; } static inline u32 hpsa_tag_to_index(u32 tag) { return tag >> DIRECT_LOOKUP_SHIFT; } static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) { #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) #define HPSA_SIMPLE_ERROR_BITS 0x03 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) return tag & ~HPSA_SIMPLE_ERROR_BITS; return tag & ~HPSA_PERF_ERROR_BITS; } /* process completion of an indexed ("direct lookup") command */ static inline u32 process_indexed_cmd(struct ctlr_info *h, u32 raw_tag) { u32 tag_index; struct CommandList *c; tag_index = hpsa_tag_to_index(raw_tag); if (bad_tag(h, tag_index, raw_tag)) return next_command(h); c = h->cmd_pool + tag_index; finish_cmd(c, raw_tag); return next_command(h); } /* process completion of a non-indexed command */ static inline u32 process_nonindexed_cmd(struct ctlr_info *h, u32 raw_tag) { u32 tag; struct CommandList *c = NULL; tag = hpsa_tag_discard_error_bits(h, raw_tag); list_for_each_entry(c, &h->cmpQ, list) { if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { finish_cmd(c, raw_tag); return next_command(h); } } bad_tag(h, h->nr_cmds + 1, raw_tag); return next_command(h); } /* Some controllers, like p400, will give us one interrupt * after a soft reset, even if we turned interrupts off. * Only need to check for this in the hpsa_xxx_discard_completions * functions. */ static int ignore_bogus_interrupt(struct ctlr_info *h) { if (likely(!reset_devices)) return 0; if (likely(h->interrupts_enabled)) return 0; dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " "(known firmware bug.) Ignoring.\n"); return 1; } static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id) { struct ctlr_info *h = dev_id; unsigned long flags; u32 raw_tag; if (ignore_bogus_interrupt(h)) return IRQ_NONE; if (interrupt_not_for_us(h)) return IRQ_NONE; spin_lock_irqsave(&h->lock, flags); while (interrupt_pending(h)) { raw_tag = get_next_completion(h); while (raw_tag != FIFO_EMPTY) raw_tag = next_command(h); } spin_unlock_irqrestore(&h->lock, flags); return IRQ_HANDLED; } static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id) { struct ctlr_info *h = dev_id; unsigned long flags; u32 raw_tag; if (ignore_bogus_interrupt(h)) return IRQ_NONE; spin_lock_irqsave(&h->lock, flags); raw_tag = get_next_completion(h); while (raw_tag != FIFO_EMPTY) raw_tag = next_command(h); spin_unlock_irqrestore(&h->lock, flags); return IRQ_HANDLED; } static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) { struct ctlr_info *h = dev_id; unsigned long flags; u32 raw_tag; if (interrupt_not_for_us(h)) return IRQ_NONE; spin_lock_irqsave(&h->lock, flags); while (interrupt_pending(h)) { raw_tag = get_next_completion(h); while (raw_tag != FIFO_EMPTY) { if (hpsa_tag_contains_index(raw_tag)) raw_tag = process_indexed_cmd(h, raw_tag); else raw_tag = process_nonindexed_cmd(h, raw_tag); } } spin_unlock_irqrestore(&h->lock, flags); return IRQ_HANDLED; } static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) { struct ctlr_info *h = dev_id; unsigned long flags; u32 raw_tag; spin_lock_irqsave(&h->lock, flags); raw_tag = get_next_completion(h); while (raw_tag != FIFO_EMPTY) { if (hpsa_tag_contains_index(raw_tag)) raw_tag = process_indexed_cmd(h, raw_tag); else raw_tag = process_nonindexed_cmd(h, raw_tag); } spin_unlock_irqrestore(&h->lock, flags); return IRQ_HANDLED; } /* Send a message CDB to the firmware. Careful, this only works * in simple mode, not performant mode due to the tag lookup. * We only ever use this immediately after a controller reset. */ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type) { struct Command { struct CommandListHeader CommandHeader; struct RequestBlock Request; struct ErrDescriptor ErrorDescriptor; }; struct Command *cmd; static const size_t cmd_sz = sizeof(*cmd) + sizeof(cmd->ErrorDescriptor); dma_addr_t paddr64; uint32_t paddr32, tag; void __iomem *vaddr; int i, err; vaddr = pci_ioremap_bar(pdev, 0); if (vaddr == NULL) return -ENOMEM; /* The Inbound Post Queue only accepts 32-bit physical addresses for the * CCISS commands, so they must be allocated from the lower 4GiB of * memory. */ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { iounmap(vaddr); return -ENOMEM; } cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); if (cmd == NULL) { iounmap(vaddr); return -ENOMEM; } /* This must fit, because of the 32-bit consistent DMA mask. Also, * although there's no guarantee, we assume that the address is at * least 4-byte aligned (most likely, it's page-aligned). */ paddr32 = paddr64; cmd->CommandHeader.ReplyQueue = 0; cmd->CommandHeader.SGList = 0; cmd->CommandHeader.SGTotal = 0; cmd->CommandHeader.Tag.lower = paddr32; cmd->CommandHeader.Tag.upper = 0; memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); cmd->Request.CDBLen = 16; cmd->Request.Type.Type = TYPE_MSG; cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; cmd->Request.Type.Direction = XFER_NONE; cmd->Request.Timeout = 0; /* Don't time out */ cmd->Request.CDB[0] = opcode; cmd->Request.CDB[1] = type; memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); cmd->ErrorDescriptor.Addr.upper = 0; cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) break; msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); } iounmap(vaddr); /* we leak the DMA buffer here ... no choice since the controller could * still complete the command. */ if (i == HPSA_MSG_SEND_RETRY_LIMIT) { dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", opcode, type); return -ETIMEDOUT; } pci_free_consistent(pdev, cmd_sz, cmd, paddr64); if (tag & HPSA_ERROR_BIT) { dev_err(&pdev->dev, "controller message %02x:%02x failed\n", opcode, type); return -EIO; } dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", opcode, type); return 0; } #define hpsa_noop(p) hpsa_message(p, 3, 0) static int hpsa_controller_hard_reset(struct pci_dev *pdev, void * __iomem vaddr, u32 use_doorbell) { u16 pmcsr; int pos; if (use_doorbell) { /* For everything after the P600, the PCI power state method * of resetting the controller doesn't work, so we have this * other way using the doorbell register. */ dev_info(&pdev->dev, "using doorbell to reset controller\n"); writel(use_doorbell, vaddr + SA5_DOORBELL); } else { /* Try to do it the PCI power state way */ /* Quoting from the Open CISS Specification: "The Power * Management Control/Status Register (CSR) controls the power * state of the device. The normal operating state is D0, * CSR=00h. The software off state is D3, CSR=03h. To reset * the controller, place the interface device in D3 then to D0, * this causes a secondary PCI reset which will reset the * controller." */ pos = pci_find_capability(pdev, PCI_CAP_ID_PM); if (pos == 0) { dev_err(&pdev->dev, "hpsa_reset_controller: " "PCI PM not supported\n"); return -ENODEV; } dev_info(&pdev->dev, "using PCI PM to reset controller\n"); /* enter the D3hot power management state */ pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= PCI_D3hot; pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); msleep(500); /* enter the D0 power management state */ pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= PCI_D0; pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); /* * The P600 requires a small delay when changing states. * Otherwise we may think the board did not reset and we bail. * This for kdump only and is particular to the P600. */ msleep(500); } return 0; } static __devinit void init_driver_version(char *driver_version, int len) { memset(driver_version, 0, len); strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1); } static __devinit int write_driver_ver_to_cfgtable( struct CfgTable __iomem *cfgtable) { char *driver_version; int i, size = sizeof(cfgtable->driver_version); driver_version = kmalloc(size, GFP_KERNEL); if (!driver_version) return -ENOMEM; init_driver_version(driver_version, size); for (i = 0; i < size; i++) writeb(driver_version[i], &cfgtable->driver_version[i]); kfree(driver_version); return 0; } static __devinit void read_driver_ver_from_cfgtable( struct CfgTable __iomem *cfgtable, unsigned char *driver_ver) { int i; for (i = 0; i < sizeof(cfgtable->driver_version); i++) driver_ver[i] = readb(&cfgtable->driver_version[i]); } static __devinit int controller_reset_failed( struct CfgTable __iomem *cfgtable) { char *driver_ver, *old_driver_ver; int rc, size = sizeof(cfgtable->driver_version); old_driver_ver = kmalloc(2 * size, GFP_KERNEL); if (!old_driver_ver) return -ENOMEM; driver_ver = old_driver_ver + size; /* After a reset, the 32 bytes of "driver version" in the cfgtable * should have been changed, otherwise we know the reset failed. */ init_driver_version(old_driver_ver, size); read_driver_ver_from_cfgtable(cfgtable, driver_ver); rc = !memcmp(driver_ver, old_driver_ver, size); kfree(old_driver_ver); return rc; } /* This does a hard reset of the controller using PCI power management * states or the using the doorbell register. */ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) { u64 cfg_offset; u32 cfg_base_addr; u64 cfg_base_addr_index; void __iomem *vaddr; unsigned long paddr; u32 misc_fw_support; int rc; struct CfgTable __iomem *cfgtable; u32 use_doorbell; u32 board_id; u16 command_register; /* For controllers as old as the P600, this is very nearly * the same thing as * * pci_save_state(pci_dev); * pci_set_power_state(pci_dev, PCI_D3hot); * pci_set_power_state(pci_dev, PCI_D0); * pci_restore_state(pci_dev); * * For controllers newer than the P600, the pci power state * method of resetting doesn't work so we have another way * using the doorbell register. */ rc = hpsa_lookup_board_id(pdev, &board_id); if (rc < 0 || !ctlr_is_resettable(board_id)) { dev_warn(&pdev->dev, "Not resetting device.\n"); return -ENODEV; } /* if controller is soft- but not hard resettable... */ if (!ctlr_is_hard_resettable(board_id)) return -ENOTSUPP; /* try soft reset later. */ /* Save the PCI command register */ pci_read_config_word(pdev, 4, &command_register); /* Turn the board off. This is so that later pci_restore_state() * won't turn the board on before the rest of config space is ready. */ pci_disable_device(pdev); pci_save_state(pdev); /* find the first memory BAR, so we can find the cfg table */ rc = hpsa_pci_find_memory_BAR(pdev, &paddr); if (rc) return rc; vaddr = remap_pci_mem(paddr, 0x250); if (!vaddr) return -ENOMEM; /* find cfgtable in order to check if reset via doorbell is supported */ rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, &cfg_base_addr_index, &cfg_offset); if (rc) goto unmap_vaddr; cfgtable = remap_pci_mem(pci_resource_start(pdev, cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); if (!cfgtable) { rc = -ENOMEM; goto unmap_vaddr; } rc = write_driver_ver_to_cfgtable(cfgtable); if (rc) goto unmap_vaddr; /* If reset via doorbell register is supported, use that. * There are two such methods. Favor the newest method. */ misc_fw_support = readl(&cfgtable->misc_fw_support); use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; if (use_doorbell) { use_doorbell = DOORBELL_CTLR_RESET2; } else { use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; if (use_doorbell) { dev_warn(&pdev->dev, "Controller claims that " "'Bit 2 doorbell reset' is " "supported, but not 'bit 5 doorbell reset'. " "Firmware update is recommended.\n"); rc = -ENOTSUPP; /* try soft reset */ goto unmap_cfgtable; } } rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) goto unmap_cfgtable; pci_restore_state(pdev); rc = pci_enable_device(pdev); if (rc) { dev_warn(&pdev->dev, "failed to enable device.\n"); goto unmap_cfgtable; } pci_write_config_word(pdev, 4, command_register); /* Some devices (notably the HP Smart Array 5i Controller) need a little pause here */ msleep(HPSA_POST_RESET_PAUSE_MSECS); /* Wait for board to become not ready, then ready. */ dev_info(&pdev->dev, "Waiting for board to reset.\n"); rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); if (rc) { dev_warn(&pdev->dev, "failed waiting for board to reset." " Will try soft reset.\n"); rc = -ENOTSUPP; /* Not expected, but try soft reset later */ goto unmap_cfgtable; } rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); if (rc) { dev_warn(&pdev->dev, "failed waiting for board to become ready " "after hard reset\n"); goto unmap_cfgtable; } rc = controller_reset_failed(vaddr); if (rc < 0) goto unmap_cfgtable; if (rc) { dev_warn(&pdev->dev, "Unable to successfully reset " "controller. Will try soft reset.\n"); rc = -ENOTSUPP; } else { dev_info(&pdev->dev, "board ready after hard reset.\n"); } unmap_cfgtable: iounmap(cfgtable); unmap_vaddr: iounmap(vaddr); return rc; } /* * We cannot read the structure directly, for portability we must use * the io functions. * This is for debug only. */ static void print_cfg_table(struct device *dev, struct CfgTable *tb) { #ifdef HPSA_DEBUG int i; char temp_name[17]; dev_info(dev, "Controller Configuration information\n"); dev_info(dev, "------------------------------------\n"); for (i = 0; i < 4; i++) temp_name[i] = readb(&(tb->Signature[i])); temp_name[4] = '\0'; dev_info(dev, " Signature = %s\n", temp_name); dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); dev_info(dev, " Transport methods supported = 0x%x\n", readl(&(tb->TransportSupport))); dev_info(dev, " Transport methods active = 0x%x\n", readl(&(tb->TransportActive))); dev_info(dev, " Requested transport Method = 0x%x\n", readl(&(tb->HostWrite.TransportRequest))); dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", readl(&(tb->HostWrite.CoalIntDelay))); dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", readl(&(tb->HostWrite.CoalIntCount))); dev_info(dev, " Max outstanding commands = 0x%d\n", readl(&(tb->CmdsOutMax))); dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); for (i = 0; i < 16; i++) temp_name[i] = readb(&(tb->ServerName[i])); temp_name[16] = '\0'; dev_info(dev, " Server Name = %s\n", temp_name); dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat))); #endif /* HPSA_DEBUG */ } static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) { int i, offset, mem_type, bar_type; if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ return 0; offset = 0; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) offset += 4; else { mem_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_MEM_TYPE_MASK; switch (mem_type) { case PCI_BASE_ADDRESS_MEM_TYPE_32: case PCI_BASE_ADDRESS_MEM_TYPE_1M: offset += 4; /* 32 bit */ break; case PCI_BASE_ADDRESS_MEM_TYPE_64: offset += 8; break; default: /* reserved in PCI 2.2 */ dev_warn(&pdev->dev, "base address is invalid\n"); return -1; break; } } if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) return i + 1; } return -1; } /* If MSI/MSI-X is supported by the kernel we will try to enable it on * controllers that are capable. If not, we use IO-APIC mode. */ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h) { #ifdef CONFIG_PCI_MSI int err; struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1}, {0, 2}, {0, 3} }; /* Some boards advertise MSI but don't really support it */ if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) goto default_int_mode; if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { dev_info(&h->pdev->dev, "MSIX\n"); err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4); if (!err) { h->intr[0] = hpsa_msix_entries[0].vector; h->intr[1] = hpsa_msix_entries[1].vector; h->intr[2] = hpsa_msix_entries[2].vector; h->intr[3] = hpsa_msix_entries[3].vector; h->msix_vector = 1; return; } if (err > 0) { dev_warn(&h->pdev->dev, "only %d MSI-X vectors " "available\n", err); goto default_int_mode; } else { dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); goto default_int_mode; } } if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { dev_info(&h->pdev->dev, "MSI\n"); if (!pci_enable_msi(h->pdev)) h->msi_vector = 1; else dev_warn(&h->pdev->dev, "MSI init failed\n"); } default_int_mode: #endif /* CONFIG_PCI_MSI */ /* if we get here we're going to use the default interrupt mode */ h->intr[h->intr_mode] = h->pdev->irq; } static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) { int i; u32 subsystem_vendor_id, subsystem_device_id; subsystem_vendor_id = pdev->subsystem_vendor; subsystem_device_id = pdev->subsystem_device; *board_id = ((subsystem_device_id << 16) & 0xffff0000) | subsystem_vendor_id; for (i = 0; i < ARRAY_SIZE(products); i++) if (*board_id == products[i].board_id) return i; if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || !hpsa_allow_any) { dev_warn(&pdev->dev, "unrecognized board ID: " "0x%08x, ignoring.\n", *board_id); return -ENODEV; } return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ } static inline bool hpsa_board_disabled(struct pci_dev *pdev) { u16 command; (void) pci_read_config_word(pdev, PCI_COMMAND, &command); return ((command & PCI_COMMAND_MEMORY) == 0); } static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar) { int i; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { /* addressing mode bits already removed */ *memory_bar = pci_resource_start(pdev, i); dev_dbg(&pdev->dev, "memory BAR = %lx\n", *memory_bar); return 0; } dev_warn(&pdev->dev, "no memory BAR found\n"); return -ENODEV; } static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, int wait_for_ready) { int i, iterations; u32 scratchpad; if (wait_for_ready) iterations = HPSA_BOARD_READY_ITERATIONS; else iterations = HPSA_BOARD_NOT_READY_ITERATIONS; for (i = 0; i < iterations; i++) { scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); if (wait_for_ready) { if (scratchpad == HPSA_FIRMWARE_READY) return 0; } else { if (scratchpad != HPSA_FIRMWARE_READY) return 0; } msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); } dev_warn(&pdev->dev, "board not ready, timed out.\n"); return -ENODEV; } static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, u64 *cfg_offset) { *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); *cfg_base_addr &= (u32) 0x0000ffff; *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); if (*cfg_base_addr_index == -1) { dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); return -ENODEV; } return 0; } static int __devinit hpsa_find_cfgtables(struct ctlr_info *h) { u64 cfg_offset; u32 cfg_base_addr; u64 cfg_base_addr_index; u32 trans_offset; int rc; rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, &cfg_base_addr_index, &cfg_offset); if (rc) return rc; h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); if (!h->cfgtable) return -ENOMEM; rc = write_driver_ver_to_cfgtable(h->cfgtable); if (rc) return rc; /* Find performant mode table. */ trans_offset = readl(&h->cfgtable->TransMethodOffset); h->transtable = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index)+cfg_offset+trans_offset, sizeof(*h->transtable)); if (!h->transtable) return -ENOMEM; return 0; } static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) { h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); /* Limit commands in memory limited kdump scenario. */ if (reset_devices && h->max_commands > 32) h->max_commands = 32; if (h->max_commands < 16) { dev_warn(&h->pdev->dev, "Controller reports " "max supported commands of %d, an obvious lie. " "Using 16. Ensure that firmware is up to date.\n", h->max_commands); h->max_commands = 16; } } /* Interrogate the hardware for some limits: * max commands, max SG elements without chaining, and with chaining, * SG chain block size, etc. */ static void __devinit hpsa_find_board_params(struct ctlr_info *h) { hpsa_get_max_perf_mode_cmds(h); h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); /* * Limit in-command s/g elements to 32 save dma'able memory. * Howvever spec says if 0, use 31 */ h->max_cmd_sg_entries = 31; if (h->maxsgentries > 512) { h->max_cmd_sg_entries = 32; h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; h->maxsgentries--; /* save one for chain pointer */ } else { h->maxsgentries = 31; /* default to traditional values */ h->chainsize = 0; } } static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) { if ((readb(&h->cfgtable->Signature[0]) != 'C') || (readb(&h->cfgtable->Signature[1]) != 'I') || (readb(&h->cfgtable->Signature[2]) != 'S') || (readb(&h->cfgtable->Signature[3]) != 'S')) { dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); return false; } return true; } /* Need to enable prefetch in the SCSI core for 6400 in x86 */ static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h) { #ifdef CONFIG_X86 u32 prefetch; prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); prefetch |= 0x100; writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); #endif } /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result * in a prefetch beyond physical memory. */ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) { u32 dma_prefetch; if (h->board_id != 0x3225103C) return; dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); dma_prefetch |= 0x8000; writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); } static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) { int i; u32 doorbell_value; unsigned long flags; /* under certain very rare conditions, this can take awhile. * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right * as we enter this code.) */ for (i = 0; i < MAX_CONFIG_WAIT; i++) { spin_lock_irqsave(&h->lock, flags); doorbell_value = readl(h->vaddr + SA5_DOORBELL); spin_unlock_irqrestore(&h->lock, flags); if (!(doorbell_value & CFGTBL_ChangeReq)) break; /* delay and try again */ usleep_range(10000, 20000); } } static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h) { u32 trans_support; trans_support = readl(&(h->cfgtable->TransportSupport)); if (!(trans_support & SIMPLE_MODE)) return -ENOTSUPP; h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); /* Update the field, and then ring the doorbell */ writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); hpsa_wait_for_mode_change_ack(h); print_cfg_table(&h->pdev->dev, h->cfgtable); if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); return -ENODEV; } h->transMethod = CFGTBL_Trans_Simple; return 0; } static int __devinit hpsa_pci_init(struct ctlr_info *h) { int prod_index, err; prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); if (prod_index < 0) return -ENODEV; h->product_name = products[prod_index].product_name; h->access = *(products[prod_index].access); if (hpsa_board_disabled(h->pdev)) { dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); return -ENODEV; } pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); err = pci_enable_device(h->pdev); if (err) { dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); return err; } err = pci_request_regions(h->pdev, "hpsa"); if (err) { dev_err(&h->pdev->dev, "cannot obtain PCI resources, aborting\n"); return err; } hpsa_interrupt_mode(h); err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) goto err_out_free_res; h->vaddr = remap_pci_mem(h->paddr, 0x250); if (!h->vaddr) { err = -ENOMEM; goto err_out_free_res; } err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); if (err) goto err_out_free_res; err = hpsa_find_cfgtables(h); if (err) goto err_out_free_res; hpsa_find_board_params(h); if (!hpsa_CISS_signature_present(h)) { err = -ENODEV; goto err_out_free_res; } hpsa_enable_scsi_prefetch(h); hpsa_p600_dma_prefetch_quirk(h); err = hpsa_enter_simple_mode(h); if (err) goto err_out_free_res; return 0; err_out_free_res: if (h->transtable) iounmap(h->transtable); if (h->cfgtable) iounmap(h->cfgtable); if (h->vaddr) iounmap(h->vaddr); /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo */ pci_release_regions(h->pdev); return err; } static void __devinit hpsa_hba_inquiry(struct ctlr_info *h) { int rc; #define HBA_INQUIRY_BYTE_COUNT 64 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); if (!h->hba_inquiry_data) return; rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); if (rc != 0) { kfree(h->hba_inquiry_data); h->hba_inquiry_data = NULL; } } static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev) { int rc, i; if (!reset_devices) return 0; /* Reset the controller with a PCI power-cycle or via doorbell */ rc = hpsa_kdump_hard_reset_controller(pdev); /* -ENOTSUPP here means we cannot reset the controller * but it's already (and still) up and running in * "performant mode". Or, it might be 640x, which can't reset * due to concerns about shared bbwc between 6402/6404 pair. */ if (rc == -ENOTSUPP) return rc; /* just try to do the kdump anyhow. */ if (rc) return -ENODEV; /* Now try to get the controller to respond to a no-op */ dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { if (hpsa_noop(pdev) == 0) break; else dev_warn(&pdev->dev, "no-op failed%s\n", (i < 11 ? "; re-trying" : "")); } return 0; } static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h) { h->cmd_pool_bits = kzalloc( DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL); h->cmd_pool = pci_alloc_consistent(h->pdev, h->nr_cmds * sizeof(*h->cmd_pool), &(h->cmd_pool_dhandle)); h->errinfo_pool = pci_alloc_consistent(h->pdev, h->nr_cmds * sizeof(*h->errinfo_pool), &(h->errinfo_pool_dhandle)); if ((h->cmd_pool_bits == NULL) || (h->cmd_pool == NULL) || (h->errinfo_pool == NULL)) { dev_err(&h->pdev->dev, "out of memory in %s", __func__); return -ENOMEM; } return 0; } static void hpsa_free_cmd_pool(struct ctlr_info *h) { kfree(h->cmd_pool_bits); if (h->cmd_pool) pci_free_consistent(h->pdev, h->nr_cmds * sizeof(struct CommandList), h->cmd_pool, h->cmd_pool_dhandle); if (h->errinfo_pool) pci_free_consistent(h->pdev, h->nr_cmds * sizeof(struct ErrorInfo), h->errinfo_pool, h->errinfo_pool_dhandle); } static int hpsa_request_irq(struct ctlr_info *h, irqreturn_t (*msixhandler)(int, void *), irqreturn_t (*intxhandler)(int, void *)) { int rc; if (h->msix_vector || h->msi_vector) rc = request_irq(h->intr[h->intr_mode], msixhandler, 0, h->devname, h); else rc = request_irq(h->intr[h->intr_mode], intxhandler, IRQF_SHARED, h->devname, h); if (rc) { dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", h->intr[h->intr_mode], h->devname); return -ENODEV; } return 0; } static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h) { if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER)) { dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); return -EIO; } dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); return -1; } dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { dev_warn(&h->pdev->dev, "Board failed to become ready " "after soft reset.\n"); return -1; } return 0; } static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) { free_irq(h->intr[h->intr_mode], h); #ifdef CONFIG_PCI_MSI if (h->msix_vector) pci_disable_msix(h->pdev); else if (h->msi_vector) pci_disable_msi(h->pdev); #endif /* CONFIG_PCI_MSI */ hpsa_free_sg_chain_blocks(h); hpsa_free_cmd_pool(h); kfree(h->blockFetchTable); pci_free_consistent(h->pdev, h->reply_pool_size, h->reply_pool, h->reply_pool_dhandle); if (h->vaddr) iounmap(h->vaddr); if (h->transtable) iounmap(h->transtable); if (h->cfgtable) iounmap(h->cfgtable); pci_release_regions(h->pdev); kfree(h); } static int __devinit hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int dac, rc; struct ctlr_info *h; int try_soft_reset = 0; unsigned long flags; if (number_of_controllers == 0) printk(KERN_INFO DRIVER_NAME "\n"); rc = hpsa_init_reset_devices(pdev); if (rc) { if (rc != -ENOTSUPP) return rc; /* If the reset fails in a particular way (it has no way to do * a proper hard reset, so returns -ENOTSUPP) we can try to do * a soft reset once we get the controller configured up to the * point that it can accept a command. */ try_soft_reset = 1; rc = 0; } reinit_after_soft_reset: /* Command structures must be aligned on a 32-byte boundary because * the 5 lower bits of the address are used by the hardware. and by * the driver. See comments in hpsa.h for more info. */ #define COMMANDLIST_ALIGNMENT 32 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); h = kzalloc(sizeof(*h), GFP_KERNEL); if (!h) return -ENOMEM; h->pdev = pdev; h->busy_initializing = 1; h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; INIT_LIST_HEAD(&h->cmpQ); INIT_LIST_HEAD(&h->reqQ); spin_lock_init(&h->lock); spin_lock_init(&h->scan_lock); rc = hpsa_pci_init(h); if (rc != 0) goto clean1; sprintf(h->devname, "hpsa%d", number_of_controllers); h->ctlr = number_of_controllers; number_of_controllers++; /* configure PCI DMA stuff */ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc == 0) { dac = 1; } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc == 0) { dac = 0; } else { dev_err(&pdev->dev, "no suitable DMA available\n"); goto clean1; } } /* make sure the board interrupts are off */ h->access.set_intr_mask(h, HPSA_INTR_OFF); if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) goto clean2; dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", h->devname, pdev->device, h->intr[h->intr_mode], dac ? "" : " not"); if (hpsa_allocate_cmd_pool(h)) goto clean4; if (hpsa_allocate_sg_chain_blocks(h)) goto clean4; init_waitqueue_head(&h->scan_wait_queue); h->scan_finished = 1; /* no scan currently in progress */ pci_set_drvdata(pdev, h); h->ndevices = 0; h->scsi_host = NULL; spin_lock_init(&h->devlock); hpsa_put_ctlr_into_performant_mode(h); /* At this point, the controller is ready to take commands. * Now, if reset_devices and the hard reset didn't work, try * the soft reset and see if that works. */ if (try_soft_reset) { /* This is kind of gross. We may or may not get a completion * from the soft reset command, and if we do, then the value * from the fifo may or may not be valid. So, we wait 10 secs * after the reset throwing away any completions we get during * that time. Unregister the interrupt handler and register * fake ones to scoop up any residual completions. */ spin_lock_irqsave(&h->lock, flags); h->access.set_intr_mask(h, HPSA_INTR_OFF); spin_unlock_irqrestore(&h->lock, flags); free_irq(h->intr[h->intr_mode], h); rc = hpsa_request_irq(h, hpsa_msix_discard_completions, hpsa_intx_discard_completions); if (rc) { dev_warn(&h->pdev->dev, "Failed to request_irq after " "soft reset.\n"); goto clean4; } rc = hpsa_kdump_soft_reset(h); if (rc) /* Neither hard nor soft reset worked, we're hosed. */ goto clean4; dev_info(&h->pdev->dev, "Board READY.\n"); dev_info(&h->pdev->dev, "Waiting for stale completions to drain.\n"); h->access.set_intr_mask(h, HPSA_INTR_ON); msleep(10000); h->access.set_intr_mask(h, HPSA_INTR_OFF); rc = controller_reset_failed(h->cfgtable); if (rc) dev_info(&h->pdev->dev, "Soft reset appears to have failed.\n"); /* since the controller's reset, we have to go back and re-init * everything. Easiest to just forget what we've done and do it * all over again. */ hpsa_undo_allocations_after_kdump_soft_reset(h); try_soft_reset = 0; if (rc) /* don't go to clean4, we already unallocated */ return -ENODEV; goto reinit_after_soft_reset; } /* Turn the interrupts on so we can service requests */ h->access.set_intr_mask(h, HPSA_INTR_ON); hpsa_hba_inquiry(h); hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ h->busy_initializing = 0; return 1; clean4: hpsa_free_sg_chain_blocks(h); hpsa_free_cmd_pool(h); free_irq(h->intr[h->intr_mode], h); clean2: clean1: h->busy_initializing = 0; kfree(h); return rc; } static void hpsa_flush_cache(struct ctlr_info *h) { char *flush_buf; struct CommandList *c; flush_buf = kzalloc(4, GFP_KERNEL); if (!flush_buf) return; c = cmd_special_alloc(h); if (!c) { dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); goto out_of_memory; } fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, RAID_CTLR_LUNID, TYPE_CMD); hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); if (c->err_info->CommandStatus != 0) dev_warn(&h->pdev->dev, "error flushing cache on controller\n"); cmd_special_free(h, c); out_of_memory: kfree(flush_buf); } static void hpsa_shutdown(struct pci_dev *pdev) { struct ctlr_info *h; h = pci_get_drvdata(pdev); /* Turn board interrupts off and send the flush cache command * sendcmd will turn off interrupt, and send the flush... * To write all data in the battery backed cache to disks */ hpsa_flush_cache(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); free_irq(h->intr[h->intr_mode], h); #ifdef CONFIG_PCI_MSI if (h->msix_vector) pci_disable_msix(h->pdev); else if (h->msi_vector) pci_disable_msi(h->pdev); #endif /* CONFIG_PCI_MSI */ } static void __devexit hpsa_remove_one(struct pci_dev *pdev) { struct ctlr_info *h; if (pci_get_drvdata(pdev) == NULL) { dev_err(&pdev->dev, "unable to remove device \n"); return; } h = pci_get_drvdata(pdev); hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ hpsa_shutdown(pdev); iounmap(h->vaddr); iounmap(h->transtable); iounmap(h->cfgtable); hpsa_free_sg_chain_blocks(h); pci_free_consistent(h->pdev, h->nr_cmds * sizeof(struct CommandList), h->cmd_pool, h->cmd_pool_dhandle); pci_free_consistent(h->pdev, h->nr_cmds * sizeof(struct ErrorInfo), h->errinfo_pool, h->errinfo_pool_dhandle); pci_free_consistent(h->pdev, h->reply_pool_size, h->reply_pool, h->reply_pool_dhandle); kfree(h->cmd_pool_bits); kfree(h->blockFetchTable); kfree(h->hba_inquiry_data); /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo */ pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); kfree(h); } static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, __attribute__((unused)) pm_message_t state) { return -ENOSYS; } static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) { return -ENOSYS; } static struct pci_driver hpsa_pci_driver = { .name = "hpsa", .probe = hpsa_init_one, .remove = __devexit_p(hpsa_remove_one), .id_table = hpsa_pci_device_id, /* id_table */ .shutdown = hpsa_shutdown, .suspend = hpsa_suspend, .resume = hpsa_resume, }; /* Fill in bucket_map[], given nsgs (the max number of * scatter gather elements supported) and bucket[], * which is an array of 8 integers. The bucket[] array * contains 8 different DMA transfer sizes (in 16 * byte increments) which the controller uses to fetch * commands. This function fills in bucket_map[], which * maps a given number of scatter gather elements to one of * the 8 DMA transfer sizes. The point of it is to allow the * controller to only do as much DMA as needed to fetch the * command, with the DMA transfer size encoded in the lower * bits of the command address. */ static void calc_bucket_map(int bucket[], int num_buckets, int nsgs, int *bucket_map) { int i, j, b, size; /* even a command with 0 SGs requires 4 blocks */ #define MINIMUM_TRANSFER_BLOCKS 4 #define NUM_BUCKETS 8 /* Note, bucket_map must have nsgs+1 entries. */ for (i = 0; i <= nsgs; i++) { /* Compute size of a command with i SG entries */ size = i + MINIMUM_TRANSFER_BLOCKS; b = num_buckets; /* Assume the biggest bucket */ /* Find the bucket that is just big enough */ for (j = 0; j < 8; j++) { if (bucket[j] >= size) { b = j; break; } } /* for a command with i SG entries, use bucket b. */ bucket_map[i] = b; } } static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags) { int i; unsigned long register_value; /* This is a bit complicated. There are 8 registers on * the controller which we write to to tell it 8 different * sizes of commands which there may be. It's a way of * reducing the DMA done to fetch each command. Encoded into * each command's tag are 3 bits which communicate to the controller * which of the eight sizes that command fits within. The size of * each command depends on how many scatter gather entries there are. * Each SG entry requires 16 bytes. The eight registers are programmed * with the number of 16-byte blocks a command of that size requires. * The smallest command possible requires 5 such 16 byte blocks. * the largest command possible requires MAXSGENTRIES + 4 16-byte * blocks. Note, this only extends to the SG entries contained * within the command block, and does not extend to chained blocks * of SG elements. bft[] contains the eight values we write to * the registers. They are not evenly distributed, but have more * sizes for small commands, and fewer sizes for larger commands. */ int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; BUILD_BUG_ON(28 > MAXSGENTRIES + 4); /* 5 = 1 s/g entry or 4k * 6 = 2 s/g entry or 8k * 8 = 4 s/g entry or 16k * 10 = 6 s/g entry or 24k */ h->reply_pool_wraparound = 1; /* spec: init to 1 */ /* Controller spec: zero out this buffer. */ memset(h->reply_pool, 0, h->reply_pool_size); h->reply_pool_head = h->reply_pool; bft[7] = h->max_sg_entries + 4; calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable); for (i = 0; i < 8; i++) writel(bft[i], &h->transtable->BlockFetch[i]); /* size of controller ring buffer */ writel(h->max_commands, &h->transtable->RepQSize); writel(1, &h->transtable->RepQCount); writel(0, &h->transtable->RepQCtrAddrLow32); writel(0, &h->transtable->RepQCtrAddrHigh32); writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); writel(0, &h->transtable->RepQAddr0High32); writel(CFGTBL_Trans_Performant | use_short_tags, &(h->cfgtable->HostWrite.TransportRequest)); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); hpsa_wait_for_mode_change_ack(h); register_value = readl(&(h->cfgtable->TransportActive)); if (!(register_value & CFGTBL_Trans_Performant)) { dev_warn(&h->pdev->dev, "unable to get board into" " performant mode\n"); return; } /* Change the access methods to the performant access methods */ h->access = SA5_performant_access; h->transMethod = CFGTBL_Trans_Performant; } static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) { u32 trans_support; if (hpsa_simple_mode) return; trans_support = readl(&(h->cfgtable->TransportSupport)); if (!(trans_support & PERFORMANT_MODE)) return; hpsa_get_max_perf_mode_cmds(h); h->max_sg_entries = 32; /* Performant mode ring buffer and supporting data structures */ h->reply_pool_size = h->max_commands * sizeof(u64); h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, &(h->reply_pool_dhandle)); /* Need a block fetch table for performant mode */ h->blockFetchTable = kmalloc(((h->max_sg_entries+1) * sizeof(u32)), GFP_KERNEL); if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) goto clean_up; hpsa_enter_performant_mode(h, trans_support & CFGTBL_Trans_use_short_tags); return; clean_up: if (h->reply_pool) pci_free_consistent(h->pdev, h->reply_pool_size, h->reply_pool, h->reply_pool_dhandle); kfree(h->blockFetchTable); } /* * This is it. Register the PCI driver information for the cards we control * the OS will call our registered routines when it finds one of our cards. */ static int __init hpsa_init(void) { return pci_register_driver(&hpsa_pci_driver); } static void __exit hpsa_cleanup(void) { pci_unregister_driver(&hpsa_pci_driver); } module_init(hpsa_init); module_exit(hpsa_cleanup);
gpl-2.0
Kretol/SkyFire_5xx
dep/acelite/ace/WFMO_Reactor.cpp
263
92257
// $Id: WFMO_Reactor.cpp 95368 2011-12-19 13:38:49Z mcorino $ #include "ace/WFMO_Reactor.h" #if defined (ACE_WIN32) #include "ace/Handle_Set.h" #include "ace/Timer_Heap.h" #include "ace/Thread.h" #include "ace/OS_NS_errno.h" #include "ace/Null_Condition.h" #if !defined (__ACE_INLINE__) #include "ace/WFMO_Reactor.inl" #endif /* __ACE_INLINE__ */ #include "ace/Auto_Ptr.h" ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_WFMO_Reactor_Handler_Repository::ACE_WFMO_Reactor_Handler_Repository (ACE_WFMO_Reactor &wfmo_reactor) : wfmo_reactor_ (wfmo_reactor) { } int ACE_WFMO_Reactor_Handler_Repository::open (size_t size) { if (size > MAXIMUM_WAIT_OBJECTS) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%d exceeds MAXIMUM_WAIT_OBJECTS (%d)\n"), size, MAXIMUM_WAIT_OBJECTS), -1); // Dynamic allocation ACE_NEW_RETURN (this->current_handles_, ACE_HANDLE[size], -1); ACE_NEW_RETURN (this->current_info_, Current_Info[size], -1); ACE_NEW_RETURN (this->current_suspended_info_, Suspended_Info[size], -1); ACE_NEW_RETURN (this->to_be_added_info_, To_Be_Added_Info[size], -1); // Initialization this->max_size_ = size; this->max_handlep1_ = 0; this->suspended_handles_ = 0; this->handles_to_be_added_ = 0; this->handles_to_be_deleted_ = 0; this->handles_to_be_suspended_ = 0; this->handles_to_be_resumed_ = 0; for (size_t i = 0; i < size; ++i) this->current_handles_[i] = ACE_INVALID_HANDLE; return 0; } ACE_WFMO_Reactor_Handler_Repository::~ACE_WFMO_Reactor_Handler_Repository (void) { // Free up dynamically allocated space delete [] this->current_handles_; delete [] this->current_info_; delete [] this->current_suspended_info_; delete [] this->to_be_added_info_; } ACE_Reactor_Mask ACE_WFMO_Reactor_Handler_Repository::bit_ops (long &existing_masks, ACE_Reactor_Mask change_masks, int operation) { // Find the old reactor masks. This automatically does the work of // the GET_MASK operation. ACE_Reactor_Mask old_masks = ACE_Event_Handler::NULL_MASK; if (ACE_BIT_ENABLED (existing_masks, FD_READ) || ACE_BIT_ENABLED (existing_masks, FD_CLOSE)) ACE_SET_BITS (old_masks, ACE_Event_Handler::READ_MASK); if (ACE_BIT_ENABLED (existing_masks, FD_WRITE)) ACE_SET_BITS (old_masks, ACE_Event_Handler::WRITE_MASK); if (ACE_BIT_ENABLED (existing_masks, FD_OOB)) ACE_SET_BITS (old_masks, ACE_Event_Handler::EXCEPT_MASK); if (ACE_BIT_ENABLED (existing_masks, FD_ACCEPT)) ACE_SET_BITS (old_masks, ACE_Event_Handler::ACCEPT_MASK); if (ACE_BIT_ENABLED (existing_masks, FD_CONNECT)) ACE_SET_BITS (old_masks, ACE_Event_Handler::CONNECT_MASK); if (ACE_BIT_ENABLED (existing_masks, FD_QOS)) ACE_SET_BITS (old_masks, ACE_Event_Handler::QOS_MASK); if (ACE_BIT_ENABLED (existing_masks, FD_GROUP_QOS)) ACE_SET_BITS (old_masks, ACE_Event_Handler::GROUP_QOS_MASK); switch (operation) { case ACE_Reactor::CLR_MASK: // For the CLR_MASK operation, clear only the specific masks. if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::READ_MASK)) { ACE_CLR_BITS (existing_masks, FD_READ); ACE_CLR_BITS (existing_masks, FD_CLOSE); } if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::WRITE_MASK)) ACE_CLR_BITS (existing_masks, FD_WRITE); if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::EXCEPT_MASK)) ACE_CLR_BITS (existing_masks, FD_OOB); if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::ACCEPT_MASK)) ACE_CLR_BITS (existing_masks, FD_ACCEPT); if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::CONNECT_MASK)) ACE_CLR_BITS (existing_masks, FD_CONNECT); if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::QOS_MASK)) ACE_CLR_BITS (existing_masks, FD_QOS); if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::GROUP_QOS_MASK)) ACE_CLR_BITS (existing_masks, FD_GROUP_QOS); break; case ACE_Reactor::SET_MASK: // If the operation is a set, first reset any existing masks existing_masks = 0; /* FALLTHRU */ case ACE_Reactor::ADD_MASK: // For the ADD_MASK and the SET_MASK operation, add only the // specific masks. if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::READ_MASK)) { ACE_SET_BITS (existing_masks, FD_READ); ACE_SET_BITS (existing_masks, FD_CLOSE); } if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::WRITE_MASK)) ACE_SET_BITS (existing_masks, FD_WRITE); if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::EXCEPT_MASK)) ACE_SET_BITS (existing_masks, FD_OOB); if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::ACCEPT_MASK)) ACE_SET_BITS (existing_masks, FD_ACCEPT); if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::CONNECT_MASK)) ACE_SET_BITS (existing_masks, FD_CONNECT); if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::QOS_MASK)) ACE_SET_BITS (existing_masks, FD_QOS); if (ACE_BIT_ENABLED (change_masks, ACE_Event_Handler::GROUP_QOS_MASK)) ACE_SET_BITS (existing_masks, FD_GROUP_QOS); break; case ACE_Reactor::GET_MASK: // The work for this operation is done in all cases at the // begining of the function. ACE_UNUSED_ARG (change_masks); break; } return old_masks; } int ACE_WFMO_Reactor_Handler_Repository::unbind_i (ACE_HANDLE handle, ACE_Reactor_Mask mask, bool &changes_required) { int error = 0; // Remember this value; only if it changes do we need to wakeup // the other threads size_t const original_handle_count = this->handles_to_be_deleted_; size_t i; // Go through all the handles looking for <handle>. Even if we find // it, we continue through the rest of the list since <handle> could // appear multiple times. All handles are checked. // First check the current entries for (i = 0; i < this->max_handlep1_ && error == 0; ++i) // Since the handle can either be the event or the I/O handle, // we have to check both if ((this->current_handles_[i] == handle || this->current_info_[i].io_handle_ == handle) && // Make sure that it is not already marked for deleted !this->current_info_[i].delete_entry_) { if (this->remove_handler_i (i, mask) == -1) error = 1; } // Then check the suspended entries for (i = 0; i < this->suspended_handles_ && error == 0; ++i) // Since the handle can either be the event or the I/O handle, we // have to check both if ((this->current_suspended_info_[i].io_handle_ == handle || this->current_suspended_info_[i].event_handle_ == handle) && // Make sure that it is not already marked for deleted !this->current_suspended_info_[i].delete_entry_) { if (this->remove_suspended_handler_i (i, mask) == -1) error = 1; } // Then check the to_be_added entries for (i = 0; i < this->handles_to_be_added_ && error == 0; ++i) // Since the handle can either be the event or the I/O handle, // we have to check both if ((this->to_be_added_info_[i].io_handle_ == handle || this->to_be_added_info_[i].event_handle_ == handle) && // Make sure that it is not already marked for deleted !this->to_be_added_info_[i].delete_entry_) { if (this->remove_to_be_added_handler_i (i, mask) == -1) error = 1; } // Only if the number of handlers to be deleted changes do we need // to wakeup the other threads if (original_handle_count < this->handles_to_be_deleted_) changes_required = true; return error ? -1 : 0; } int ACE_WFMO_Reactor_Handler_Repository::remove_handler_i (size_t slot, ACE_Reactor_Mask to_be_removed_masks) { // I/O entries if (this->current_info_[slot].io_entry_) { // See if there are other events that the <Event_Handler> is // interested in this->bit_ops (this->current_info_[slot].network_events_, to_be_removed_masks, ACE_Reactor::CLR_MASK); // Disassociate/Reassociate the event from/with the I/O handle. // This will depend on the value of remaining set of network // events that the <event_handler> is interested in. I don't // think we can do anything about errors here, so I will not // check this. ::WSAEventSelect ((SOCKET) this->current_info_[slot].io_handle_, this->current_handles_[slot], this->current_info_[slot].network_events_); } // Normal event entries. else if (ACE_BIT_ENABLED (to_be_removed_masks, ACE_Event_Handler::DONT_CALL)) // Preserve DONT_CALL to_be_removed_masks = ACE_Event_Handler::DONT_CALL; else // Make sure that the <to_be_removed_masks> is the NULL_MASK to_be_removed_masks = ACE_Event_Handler::NULL_MASK; // If this event was marked for suspension, undo the suspension flag // and reduce the to be suspended count. if (this->current_info_[slot].suspend_entry_) { // Undo suspension this->current_info_[slot].suspend_entry_ = false; // Decrement the handle count --this->handles_to_be_suspended_; } // If there are no more events that the <Event_Handler> is // interested in, or this is a non-I/O entry, schedule the // <Event_Handler> for removal if (this->current_info_[slot].network_events_ == 0) { // Mark to be deleted this->current_info_[slot].delete_entry_ = true; // Remember the mask this->current_info_[slot].close_masks_ = to_be_removed_masks; // Increment the handle count ++this->handles_to_be_deleted_; } // Since it is not a complete removal, we'll call handle_close // for all the masks that were removed. This does not change // the internal state of the reactor. // // Note: this condition only applies to I/O entries else if (ACE_BIT_ENABLED (to_be_removed_masks, ACE_Event_Handler::DONT_CALL) == 0) { ACE_HANDLE handle = this->current_info_[slot].io_handle_; this->current_info_[slot].event_handler_->handle_close (handle, to_be_removed_masks); } return 0; } int ACE_WFMO_Reactor_Handler_Repository::remove_suspended_handler_i (size_t slot, ACE_Reactor_Mask to_be_removed_masks) { // I/O entries if (this->current_suspended_info_[slot].io_entry_) { // See if there are other events that the <Event_Handler> is // interested in this->bit_ops (this->current_suspended_info_[slot].network_events_, to_be_removed_masks, ACE_Reactor::CLR_MASK); // Disassociate/Reassociate the event from/with the I/O handle. // This will depend on the value of remaining set of network // events that the <event_handler> is interested in. I don't // think we can do anything about errors here, so I will not // check this. ::WSAEventSelect ((SOCKET) this->current_suspended_info_[slot].io_handle_, this->current_suspended_info_[slot].event_handle_, this->current_suspended_info_[slot].network_events_); } // Normal event entries. else if (ACE_BIT_ENABLED (to_be_removed_masks, ACE_Event_Handler::DONT_CALL)) // Preserve DONT_CALL to_be_removed_masks = ACE_Event_Handler::DONT_CALL; else // Make sure that the <to_be_removed_masks> is the NULL_MASK to_be_removed_masks = ACE_Event_Handler::NULL_MASK; // If this event was marked for resumption, undo the resumption flag // and reduce the to be resumed count. if (this->current_suspended_info_[slot].resume_entry_) { // Undo resumption this->current_suspended_info_[slot].resume_entry_ = false; // Decrement the handle count --this->handles_to_be_resumed_; } // If there are no more events that the <Event_Handler> is // interested in, or this is a non-I/O entry, schedule the // <Event_Handler> for removal if (this->current_suspended_info_[slot].network_events_ == 0) { // Mark to be deleted this->current_suspended_info_[slot].delete_entry_ = true; // Remember the mask this->current_suspended_info_[slot].close_masks_ = to_be_removed_masks; // Increment the handle count ++this->handles_to_be_deleted_; } // Since it is not a complete removal, we'll call handle_close for // all the masks that were removed. This does not change the // internal state of the reactor. // // Note: this condition only applies to I/O entries else if (ACE_BIT_ENABLED (to_be_removed_masks, ACE_Event_Handler::DONT_CALL) == 0) { ACE_HANDLE handle = this->current_suspended_info_[slot].io_handle_; this->current_suspended_info_[slot].event_handler_->handle_close (handle, to_be_removed_masks); } return 0; } int ACE_WFMO_Reactor_Handler_Repository::remove_to_be_added_handler_i (size_t slot, ACE_Reactor_Mask to_be_removed_masks) { // I/O entries if (this->to_be_added_info_[slot].io_entry_) { // See if there are other events that the <Event_Handler> is // interested in this->bit_ops (this->to_be_added_info_[slot].network_events_, to_be_removed_masks, ACE_Reactor::CLR_MASK); // Disassociate/Reassociate the event from/with the I/O handle. // This will depend on the value of remaining set of network // events that the <event_handler> is interested in. I don't // think we can do anything about errors here, so I will not // check this. ::WSAEventSelect ((SOCKET) this->to_be_added_info_[slot].io_handle_, this->to_be_added_info_[slot].event_handle_, this->to_be_added_info_[slot].network_events_); } // Normal event entries. else if (ACE_BIT_ENABLED (to_be_removed_masks, ACE_Event_Handler::DONT_CALL)) // Preserve DONT_CALL to_be_removed_masks = ACE_Event_Handler::DONT_CALL; else // Make sure that the <to_be_removed_masks> is the NULL_MASK to_be_removed_masks = ACE_Event_Handler::NULL_MASK; // If this event was marked for suspension, undo the suspension flag // and reduce the to be suspended count. if (this->to_be_added_info_[slot].suspend_entry_) { // Undo suspension this->to_be_added_info_[slot].suspend_entry_ = false; // Decrement the handle count --this->handles_to_be_suspended_; } // If there are no more events that the <Event_Handler> is // interested in, or this is a non-I/O entry, schedule the // <Event_Handler> for removal if (this->to_be_added_info_[slot].network_events_ == 0) { // Mark to be deleted this->to_be_added_info_[slot].delete_entry_ = true; // Remember the mask this->to_be_added_info_[slot].close_masks_ = to_be_removed_masks; // Increment the handle count ++this->handles_to_be_deleted_; } // Since it is not a complete removal, we'll call handle_close // for all the masks that were removed. This does not change // the internal state of the reactor. // // Note: this condition only applies to I/O entries else if (ACE_BIT_ENABLED (to_be_removed_masks, ACE_Event_Handler::DONT_CALL) == 0) { ACE_HANDLE handle = this->to_be_added_info_[slot].io_handle_; this->to_be_added_info_[slot].event_handler_->handle_close (handle, to_be_removed_masks); } return 0; } int ACE_WFMO_Reactor_Handler_Repository::suspend_handler_i (ACE_HANDLE handle, bool &changes_required) { size_t i = 0; // Go through all the handles looking for <handle>. Even if we find // it, we continue through the rest of the list since <handle> could // appear multiple times. All handles are checked. // Check the current entries first. for (i = 0; i < this->max_handlep1_; ++i) // Since the handle can either be the event or the I/O handle, // we have to check both if ((this->current_handles_[i] == handle || this->current_info_[i].io_handle_ == handle) && // Make sure that it is not already marked for suspension !this->current_info_[i].suspend_entry_) { // Mark to be suspended this->current_info_[i].suspend_entry_ = true; // Increment the handle count ++this->handles_to_be_suspended_; // Changes will be required changes_required = true; } // Then check the suspended entries. for (i = 0; i < this->suspended_handles_; ++i) // Since the handle can either be the event or the I/O handle, // we have to check both if ((this->current_suspended_info_[i].event_handle_ == handle || this->current_suspended_info_[i].io_handle_ == handle) && // Make sure that the resumption is not already undone this->current_suspended_info_[i].resume_entry_) { // Undo resumption this->current_suspended_info_[i].resume_entry_ = false; // Decrement the handle count --this->handles_to_be_resumed_; // Changes will be required changes_required = true; } // Then check the to_be_added entries. for (i = 0; i < this->handles_to_be_added_; ++i) // Since the handle can either be the event or the I/O handle, // we have to check both if ((this->to_be_added_info_[i].io_handle_ == handle || this->to_be_added_info_[i].event_handle_ == handle) && // Make sure that it is not already marked for suspension !this->to_be_added_info_[i].suspend_entry_) { // Mark to be suspended this->to_be_added_info_[i].suspend_entry_ = true; // Increment the handle count ++this->handles_to_be_suspended_; // Changes will be required changes_required = true; } return 0; } int ACE_WFMO_Reactor_Handler_Repository::resume_handler_i (ACE_HANDLE handle, bool &changes_required) { size_t i = 0; // Go through all the handles looking for <handle>. Even if we find // it, we continue through the rest of the list since <handle> could // appear multiple times. All handles are checked. // Check the current entries first. for (i = 0; i < this->max_handlep1_; ++i) // Since the handle can either be the event or the I/O handle, // we have to check both if ((this->current_handles_[i] == handle || this->current_info_[i].io_handle_ == handle) && // Make sure that the suspension is not already undone this->current_info_[i].suspend_entry_) { // Undo suspension this->current_info_[i].suspend_entry_ = false; // Decrement the handle count --this->handles_to_be_suspended_; // Changes will be required changes_required = true; } // Then check the suspended entries. for (i = 0; i < this->suspended_handles_; ++i) // Since the handle can either be the event or the I/O handle, // we have to check both if ((this->current_suspended_info_[i].event_handle_ == handle || this->current_suspended_info_[i].io_handle_ == handle) && // Make sure that it is not already marked for resumption !this->current_suspended_info_[i].resume_entry_) { // Mark to be resumed this->current_suspended_info_[i].resume_entry_ = true; // Increment the handle count ++this->handles_to_be_resumed_; // Changes will be required changes_required = true; } // Then check the to_be_added entries. for (i = 0; i < this->handles_to_be_added_; ++i) // Since the handle can either be the event or the I/O handle, // we have to check both if ((this->to_be_added_info_[i].io_handle_ == handle || this->to_be_added_info_[i].event_handle_ == handle) && // Make sure that the suspension is not already undone this->to_be_added_info_[i].suspend_entry_) { // Undo suspension this->to_be_added_info_[i].suspend_entry_ = false; // Decrement the handle count --this->handles_to_be_suspended_; // Changes will be required changes_required = true; } return 0; } void ACE_WFMO_Reactor_Handler_Repository::unbind_all (void) { { ACE_GUARD (ACE_Process_Mutex, ace_mon, this->wfmo_reactor_.lock_); bool dummy; size_t i; // Remove all the current handlers for (i = 0; i < this->max_handlep1_; ++i) this->unbind_i (this->current_handles_[i], ACE_Event_Handler::ALL_EVENTS_MASK, dummy); // Remove all the suspended handlers for (i = 0; i < this->suspended_handles_; ++i) this->unbind_i (this->current_suspended_info_[i].event_handle_, ACE_Event_Handler::ALL_EVENTS_MASK, dummy); // Remove all the to_be_added handlers for (i = 0; i < this->handles_to_be_added_; ++i) this->unbind_i (this->to_be_added_info_[i].event_handle_, ACE_Event_Handler::ALL_EVENTS_MASK, dummy); } // The guard is released here // Wake up all threads in WaitForMultipleObjects so that they can // reconsult the handle set this->wfmo_reactor_.wakeup_all_threads (); } int ACE_WFMO_Reactor_Handler_Repository::bind_i (bool io_entry, ACE_Event_Handler *event_handler, long network_events, ACE_HANDLE io_handle, ACE_HANDLE event_handle, bool delete_event) { if (event_handler == 0) return -1; // Make sure that the <handle> is valid if (event_handle == ACE_INVALID_HANDLE) event_handle = event_handler->get_handle (); if (this->invalid_handle (event_handle)) return -1; size_t current_size = this->max_handlep1_ + this->handles_to_be_added_ - this->handles_to_be_deleted_ + this->suspended_handles_; // Make sure that there's room in the table and that total pending // additions should not exceed what the <to_be_added_info_> array // can hold. if (current_size < this->max_size_ && this->handles_to_be_added_ < this->max_size_) { // Cache this set into the <to_be_added_info_>, till we come // around to actually adding this to the <current_info_> this->to_be_added_info_[this->handles_to_be_added_].set (event_handle, io_entry, event_handler, io_handle, network_events, delete_event); ++this->handles_to_be_added_; event_handler->add_reference (); // Wake up all threads in WaitForMultipleObjects so that they can // reconsult the handle set this->wfmo_reactor_.wakeup_all_threads (); } else { errno = EMFILE; // File descriptor table is full (better than nothing) return -1; } return 0; } int ACE_WFMO_Reactor_Handler_Repository::make_changes_in_current_infos (void) { // Go through the entire valid array and check for all handles that // have been schedule for deletion if (this->handles_to_be_deleted_ > 0 || this->handles_to_be_suspended_ > 0) { size_t i = 0; while (i < this->max_handlep1_) { // This stuff is necessary here, since we should not make // the upcall until all the internal data structures have // been updated. This is to protect against upcalls that // try to deregister again. ACE_HANDLE handle = ACE_INVALID_HANDLE; ACE_Reactor_Mask masks = ACE_Event_Handler::NULL_MASK; ACE_Event_Handler *event_handler = 0; // See if this entry is scheduled for deletion if (this->current_info_[i].delete_entry_) { // Calling the <handle_close> method here will ensure that we // will only call it once per deregistering <Event_Handler>. // This is essential in the case when the <Event_Handler> will // do something like delete itself and we have multiple // threads in WFMO_Reactor. // // Make sure that the DONT_CALL mask is not set masks = this->current_info_[i].close_masks_; if (ACE_BIT_ENABLED (masks, ACE_Event_Handler::DONT_CALL) == 0) { // Grab the correct handle depending on the type entry if (this->current_info_[i].io_entry_) handle = this->current_info_[i].io_handle_; else handle = this->current_handles_[i]; // Event handler event_handler = this->current_info_[i].event_handler_; } // If <WFMO_Reactor> created the event, we need to clean it up if (this->current_info_[i].delete_event_) ACE_OS::event_destroy (&this->current_handles_[i]); // Reduce count by one --this->handles_to_be_deleted_; } // See if this entry is scheduled for suspension else if (this->current_info_[i].suspend_entry_) { this->current_suspended_info_ [this->suspended_handles_].set (this->current_handles_[i], this->current_info_[i]); // Increase number of suspended handles ++this->suspended_handles_; // Reduce count by one --this->handles_to_be_suspended_; } // See if this entry is scheduled for deletion or suspension // If so we need to clean up if (this->current_info_[i].delete_entry_ || this->current_info_[i].suspend_entry_ ) { size_t last_valid_slot = this->max_handlep1_ - 1; // If this is the last handle in the set, no need to swap // places. Simply remove it. if (i < last_valid_slot) // Swap this handle with the last valid handle { // Struct copy this->current_info_[i] = this->current_info_[last_valid_slot]; this->current_handles_[i] = this->current_handles_[last_valid_slot]; } // Reset the info in this slot this->current_info_[last_valid_slot].reset (); this->current_handles_[last_valid_slot] = ACE_INVALID_HANDLE; --this->max_handlep1_; } else { // This current entry is not up for deletion or // suspension. Proceed to the next entry in the current // handles. ++i; } // Now that all internal structures have been updated, make // the upcall. if (event_handler != 0) { bool const requires_reference_counting = event_handler->reference_counting_policy ().value () == ACE_Event_Handler::Reference_Counting_Policy::ENABLED; event_handler->handle_close (handle, masks); if (requires_reference_counting) { event_handler->remove_reference (); } } } } return 0; } int ACE_WFMO_Reactor_Handler_Repository::make_changes_in_suspension_infos (void) { // Go through the <suspended_handle> array if (this->handles_to_be_deleted_ > 0 || this->handles_to_be_resumed_ > 0) { size_t i = 0; while (i < this->suspended_handles_) { // This stuff is necessary here, since we should not make // the upcall until all the internal data structures have // been updated. This is to protect against upcalls that // try to deregister again. ACE_HANDLE handle = ACE_INVALID_HANDLE; ACE_Reactor_Mask masks = ACE_Event_Handler::NULL_MASK; ACE_Event_Handler *event_handler = 0; // See if this entry is scheduled for deletion if (this->current_suspended_info_[i].delete_entry_) { // Calling the <handle_close> method here will ensure that we // will only call it once per deregistering <Event_Handler>. // This is essential in the case when the <Event_Handler> will // do something like delete itself and we have multiple // threads in WFMO_Reactor. // // Make sure that the DONT_CALL mask is not set masks = this->current_suspended_info_[i].close_masks_; if (ACE_BIT_ENABLED (masks, ACE_Event_Handler::DONT_CALL) == 0) { // Grab the correct handle depending on the type entry if (this->current_suspended_info_[i].io_entry_) handle = this->current_suspended_info_[i].io_handle_; else handle = this->current_suspended_info_[i].event_handle_; // Upcall event_handler = this->current_suspended_info_[i].event_handler_; } // If <WFMO_Reactor> created the event, we need to clean it up if (this->current_suspended_info_[i].delete_event_) ACE_OS::event_destroy (&this->current_suspended_info_[i].event_handle_); // Reduce count by one --this->handles_to_be_deleted_; } else if (this->current_suspended_info_[i].resume_entry_) { // Add to the end of the current handles set this->current_handles_[this->max_handlep1_] = this->current_suspended_info_[i].event_handle_; // Struct copy this->current_info_[this->max_handlep1_].set (this->current_suspended_info_[i]); ++this->max_handlep1_; // Reduce count by one --this->handles_to_be_resumed_; } // If an entry needs to be removed, either because it // was deleted or resumed, remove it now before doing // the upcall. if (this->current_suspended_info_[i].resume_entry_ || this->current_suspended_info_[i].delete_entry_) { size_t last_valid_slot = this->suspended_handles_ - 1; // Net effect is that we're removing an entry and // compressing the list from the end. So, if removing // an entry from the middle, copy the last valid one to the // removed slot. Reset the end and decrement the number // of suspended handles. if (i < last_valid_slot) // Struct copy this->current_suspended_info_[i] = this->current_suspended_info_[last_valid_slot]; this->current_suspended_info_[last_valid_slot].reset (); --this->suspended_handles_; } else { // This current entry is not up for deletion or // resumption. Proceed to the next entry in the // suspended handles. ++i; } // Now that all internal structures have been updated, make // the upcall. if (event_handler != 0) { int requires_reference_counting = event_handler->reference_counting_policy ().value () == ACE_Event_Handler::Reference_Counting_Policy::ENABLED; event_handler->handle_close (handle, masks); if (requires_reference_counting) { event_handler->remove_reference (); } } } } return 0; } int ACE_WFMO_Reactor_Handler_Repository::make_changes_in_to_be_added_infos (void) { // Go through the <to_be_added_*> arrays for (size_t i = 0; i < this->handles_to_be_added_; ++i) { // This stuff is necessary here, since we should not make // the upcall until all the internal data structures have // been updated. This is to protect against upcalls that // try to deregister again. ACE_HANDLE handle = ACE_INVALID_HANDLE; ACE_Reactor_Mask masks = ACE_Event_Handler::NULL_MASK; ACE_Event_Handler *event_handler = 0; // See if this entry is scheduled for deletion if (this->to_be_added_info_[i].delete_entry_) { // Calling the <handle_close> method here will ensure that we // will only call it once per deregistering <Event_Handler>. // This is essential in the case when the <Event_Handler> will // do something like delete itself and we have multiple // threads in WFMO_Reactor. // // Make sure that the DONT_CALL mask is not set masks = this->to_be_added_info_[i].close_masks_; if (ACE_BIT_ENABLED (masks, ACE_Event_Handler::DONT_CALL) == 0) { // Grab the correct handle depending on the type entry if (this->to_be_added_info_[i].io_entry_) handle = this->to_be_added_info_[i].io_handle_; else handle = this->to_be_added_info_[i].event_handle_; // Upcall event_handler = this->to_be_added_info_[i].event_handler_; } // If <WFMO_Reactor> created the event, we need to clean it up if (this->to_be_added_info_[i].delete_event_) ACE_OS::event_destroy (&this->to_be_added_info_[i].event_handle_); // Reduce count by one --this->handles_to_be_deleted_; } // See if this entry is scheduled for suspension else if (this->to_be_added_info_[i].suspend_entry_) { this->current_suspended_info_ [this->suspended_handles_].set (this->to_be_added_info_[i].event_handle_, this->to_be_added_info_[i]); // Increase number of suspended handles ++this->suspended_handles_; // Reduce count by one --this->handles_to_be_suspended_; } // If neither of the two flags are on, add to current else { // Add to the end of the current handles set this->current_handles_[this->max_handlep1_] = this->to_be_added_info_[i].event_handle_; // Struct copy this->current_info_[this->max_handlep1_].set (this->to_be_added_info_[i]); ++this->max_handlep1_; } // Reset the <to_be_added_info_> this->to_be_added_info_[i].reset (); // Now that all internal structures have been updated, make the // upcall. if (event_handler != 0) { int requires_reference_counting = event_handler->reference_counting_policy ().value () == ACE_Event_Handler::Reference_Counting_Policy::ENABLED; event_handler->handle_close (handle, masks); if (requires_reference_counting) { event_handler->remove_reference (); } } } // Since all to be added handles have been taken care of, reset the // counter this->handles_to_be_added_ = 0; return 0; } void ACE_WFMO_Reactor_Handler_Repository::dump (void) const { #if defined (ACE_HAS_DUMP) size_t i = 0; ACE_TRACE ("ACE_WFMO_Reactor_Handler_Repository::dump"); ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this)); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Max size = %d\n"), this->max_size_)); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Current info table\n\n"))); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\tSize = %d\n"), this->max_handlep1_)); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\tHandles to be suspended = %d\n"), this->handles_to_be_suspended_)); for (i = 0; i < this->max_handlep1_; ++i) this->current_info_[i].dump (this->current_handles_[i]); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\n"))); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("To-be-added info table\n\n"))); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\tSize = %d\n"), this->handles_to_be_added_)); for (i = 0; i < this->handles_to_be_added_; ++i) this->to_be_added_info_[i].dump (); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\n"))); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Suspended info table\n\n"))); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\tSize = %d\n"), this->suspended_handles_)); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\tHandles to be resumed = %d\n"), this->handles_to_be_resumed_)); for (i = 0; i < this->suspended_handles_; ++i) this->current_suspended_info_[i].dump (); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("\n"))); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Total handles to be deleted = %d\n"), this->handles_to_be_deleted_)); ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP)); #endif /* ACE_HAS_DUMP */ } /************************************************************/ int ACE_WFMO_Reactor::work_pending (const ACE_Time_Value &) { ACE_NOTSUP_RETURN (-1); } #if defined (ACE_WIN32_VC8) # pragma warning (push) # pragma warning (disable:4355) /* Use of 'this' in initializer list */ # endif ACE_WFMO_Reactor::ACE_WFMO_Reactor (ACE_Sig_Handler *sh, ACE_Timer_Queue *tq, ACE_Reactor_Notify *notify) : signal_handler_ (0), delete_signal_handler_ (false), timer_queue_ (0), delete_timer_queue_ (false), delete_handler_rep_ (false), notify_handler_ (0), delete_notify_handler_ (false), lock_adapter_ (lock_), handler_rep_ (*this), // this event is initially signaled ok_to_wait_ (1), // this event is initially unsignaled wakeup_all_threads_ (0), // this event is initially unsignaled waiting_to_change_state_ (0), active_threads_ (0), owner_ (ACE_Thread::self ()), new_owner_ (0), change_state_thread_ (0), open_for_business_ (false), deactivated_ (0) { if (this->open (ACE_WFMO_Reactor::DEFAULT_SIZE, 0, sh, tq, 0, notify) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("WFMO_Reactor"))); } ACE_WFMO_Reactor::ACE_WFMO_Reactor (size_t size, int unused, ACE_Sig_Handler *sh, ACE_Timer_Queue *tq, ACE_Reactor_Notify *notify) : signal_handler_ (0), delete_signal_handler_ (false), timer_queue_ (0), delete_timer_queue_ (false), delete_handler_rep_ (false), notify_handler_ (0), delete_notify_handler_ (false), lock_adapter_ (lock_), handler_rep_ (*this), // this event is initially signaled ok_to_wait_ (1), // this event is initially unsignaled wakeup_all_threads_ (0), // this event is initially unsignaled waiting_to_change_state_ (0), active_threads_ (0), owner_ (ACE_Thread::self ()), new_owner_ (0), change_state_thread_ (0), open_for_business_ (false), deactivated_ (0) { ACE_UNUSED_ARG (unused); if (this->open (size, 0, sh, tq, 0, notify) == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("WFMO_Reactor"))); } #if defined (ACE_WIN32_VC8) # pragma warning (pop) #endif int ACE_WFMO_Reactor::current_info (ACE_HANDLE, size_t &) { return -1; } int ACE_WFMO_Reactor::open (size_t size, bool, ACE_Sig_Handler *sh, ACE_Timer_Queue *tq, int, ACE_Reactor_Notify *notify) { // This GUARD is necessary since we are updating shared state. ACE_GUARD_RETURN (ACE_Process_Mutex, ace_mon, this->lock_, -1); // If we are already open, return -1 if (this->open_for_business_) return -1; // Timer Queue if (this->delete_timer_queue_) delete this->timer_queue_; else if (this->timer_queue_) this->timer_queue_->close (); if (tq == 0) { ACE_NEW_RETURN (this->timer_queue_, ACE_Timer_Heap, -1); this->delete_timer_queue_ = true; } else { this->timer_queue_ = tq; this->delete_timer_queue_ = false; } // Signal Handler if (this->delete_signal_handler_) delete this->signal_handler_; if (sh == 0) { ACE_NEW_RETURN (this->signal_handler_, ACE_Sig_Handler, -1); this->delete_signal_handler_ = true; } else { this->signal_handler_ = sh; this->delete_signal_handler_ = false; } // Setup the atomic wait array (used later in <handle_events>) this->atomic_wait_array_[0] = this->lock_.lock ().proc_mutex_; this->atomic_wait_array_[1] = this->ok_to_wait_.handle (); // Prevent memory leaks when the ACE_WFMO_Reactor is reopened. if (this->delete_handler_rep_) { if (this->handler_rep_.changes_required ()) { // Make necessary changes to the handler repository this->handler_rep_.make_changes (); // Turn off <wakeup_all_threads_> since all necessary changes // have completed this->wakeup_all_threads_.reset (); } this->handler_rep_.~ACE_WFMO_Reactor_Handler_Repository (); } // Open the handle repository. Two additional handles for internal // purposes if (this->handler_rep_.open (size + 2) == -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("opening handler repository")), -1); else this->delete_handler_rep_ = true; if (this->notify_handler_ != 0 && this->delete_notify_handler_) delete this->notify_handler_; this->notify_handler_ = notify; if (this->notify_handler_ == 0) { ACE_NEW_RETURN (this->notify_handler_, ACE_WFMO_Reactor_Notify, -1); if (this->notify_handler_ == 0) return -1; else this->delete_notify_handler_ = true; } /* NOTE */ // The order of the following two registrations is very important // Open the notification handler if (this->notify_handler_->open (this, this->timer_queue_) == -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("opening notify handler ")), -1); // Register for <wakeup_all_threads> event if (this->register_handler (&this->wakeup_all_threads_handler_, this->wakeup_all_threads_.handle ()) == -1) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%p\n"), ACE_TEXT ("registering thread wakeup handler")), -1); // Since we have added two handles into the handler repository, // update the <handler_repository_> if (this->handler_rep_.changes_required ()) { // Make necessary changes to the handler repository this->handler_rep_.make_changes (); // Turn off <wakeup_all_threads_> since all necessary changes // have completed this->wakeup_all_threads_.reset (); } // We are open for business this->open_for_business_ = true; return 0; } int ACE_WFMO_Reactor::set_sig_handler (ACE_Sig_Handler *signal_handler) { if (this->signal_handler_ != 0 && this->delete_signal_handler_) delete this->signal_handler_; this->signal_handler_ = signal_handler; this->delete_signal_handler_ = false; return 0; } ACE_Timer_Queue * ACE_WFMO_Reactor::timer_queue (void) const { return this->timer_queue_; } int ACE_WFMO_Reactor::timer_queue (ACE_Timer_Queue *tq) { if (this->delete_timer_queue_) { delete this->timer_queue_; } else if (this->timer_queue_) { this->timer_queue_->close (); } this->timer_queue_ = tq; this->delete_timer_queue_ = false; return 0; } int ACE_WFMO_Reactor::close (void) { // This GUARD is necessary since we are updating shared state. ACE_GUARD_RETURN (ACE_Process_Mutex, ace_mon, this->lock_, -1); // If we are already closed, return error if (!this->open_for_business_) return -1; // We are now closed this->open_for_business_ = false; // This will unregister all handles this->handler_rep_.close (); return 0; } ACE_WFMO_Reactor::~ACE_WFMO_Reactor (void) { // Assumption: No threads are left in the Reactor when this method // is called (i.e., active_threads_ == 0) // Close down this->close (); // Make necessary changes to the handler repository that we caused // by <close>. this->handler_rep_.make_changes (); if (this->delete_timer_queue_) { delete this->timer_queue_; this->timer_queue_ = 0; this->delete_timer_queue_ = false; } else if (this->timer_queue_) { this->timer_queue_->close (); this->timer_queue_ = 0; } if (this->delete_signal_handler_) { delete this->signal_handler_; this->signal_handler_ = 0; this->delete_signal_handler_ = false; } if (this->delete_notify_handler_) { delete this->notify_handler_; this->notify_handler_ = 0; this->delete_notify_handler_ = false; } } int ACE_WFMO_Reactor::register_handler_i (ACE_HANDLE event_handle, ACE_HANDLE io_handle, ACE_Event_Handler *event_handler, ACE_Reactor_Mask new_masks) { // If this is a Winsock 1 system, the underlying event assignment will // not work, so don't try. Winsock 1 must use ACE_Select_Reactor for // reacting to socket activity. #if !defined (ACE_HAS_WINSOCK2) || (ACE_HAS_WINSOCK2 == 0) ACE_UNUSED_ARG (event_handle); ACE_UNUSED_ARG (io_handle); ACE_UNUSED_ARG (event_handler); ACE_UNUSED_ARG (new_masks); ACE_NOTSUP_RETURN (-1); #else // Make sure that the <handle> is valid if (io_handle == ACE_INVALID_HANDLE) io_handle = event_handler->get_handle (); if (this->handler_rep_.invalid_handle (io_handle)) { errno = ERROR_INVALID_HANDLE; return -1; } long new_network_events = 0; bool delete_event = false; auto_ptr <ACE_Auto_Event> event; // Look up the repository to see if the <event_handler> is already // there. ACE_Reactor_Mask old_masks; int found = this->handler_rep_.modify_network_events_i (io_handle, new_masks, old_masks, new_network_events, event_handle, delete_event, ACE_Reactor::ADD_MASK); // Check to see if the user passed us a valid event; If not then we // need to create one if (event_handle == ACE_INVALID_HANDLE) { // Note: don't change this since some C++ compilers have // <auto_ptr>s that don't work properly... auto_ptr<ACE_Auto_Event> tmp (new ACE_Auto_Event); event = tmp; event_handle = event->handle (); delete_event = true; } int result = ::WSAEventSelect ((SOCKET) io_handle, event_handle, new_network_events); // If we had found the <Event_Handler> there is nothing more to do if (found) return result; else if (result != SOCKET_ERROR && this->handler_rep_.bind_i (1, event_handler, new_network_events, io_handle, event_handle, delete_event) != -1) { // The <event_handler> was not found in the repository, add to // the repository. if (delete_event) { // Clear out the handle in the ACE_Auto_Event so that when // it is destroyed, the handle isn't closed out from under // the reactor. After setting it, running down the event // (via auto_ptr<> event, above) at function return will // cause an error because it'll try to close an invalid handle. // To avoid that smashing the errno value, save the errno // here, explicitly remove the event so the dtor won't do it // again, then restore errno. ACE_Errno_Guard guard (errno); event->handle (ACE_INVALID_HANDLE); event->remove (); } return 0; } else return -1; #endif /* ACE_HAS_WINSOCK2 || ACE_HAS_WINSOCK2 == 0 */ } int ACE_WFMO_Reactor::mask_ops_i (ACE_HANDLE io_handle, ACE_Reactor_Mask new_masks, int operation) { // Make sure that the <handle> is valid if (this->handler_rep_.invalid_handle (io_handle)) return -1; long new_network_events = 0; bool delete_event = false; ACE_HANDLE event_handle = ACE_INVALID_HANDLE; // Look up the repository to see if the <Event_Handler> is already // there. ACE_Reactor_Mask old_masks; int found = this->handler_rep_.modify_network_events_i (io_handle, new_masks, old_masks, new_network_events, event_handle, delete_event, operation); if (found) { int result = ::WSAEventSelect ((SOCKET) io_handle, event_handle, new_network_events); if (result == 0) return old_masks; else return result; } else return -1; } int ACE_WFMO_Reactor_Handler_Repository::modify_network_events_i (ACE_HANDLE io_handle, ACE_Reactor_Mask new_masks, ACE_Reactor_Mask &old_masks, long &new_network_events, ACE_HANDLE &event_handle, bool &delete_event, int operation) { long *modified_network_events = &new_network_events; int found = 0; size_t i; // First go through the current entries // // Look for all entries in the current handles for matching handle // (except those that have been scheduled for deletion) for (i = 0; i < this->max_handlep1_ && !found; ++i) if (io_handle == this->current_info_[i].io_handle_ && !this->current_info_[i].delete_entry_) { found = 1; modified_network_events = &this->current_info_[i].network_events_; delete_event = this->current_info_[i].delete_event_; event_handle = this->current_handles_[i]; } // Then pass through the suspended handles // // Look for all entries in the suspended handles for matching handle // (except those that have been scheduled for deletion) for (i = 0; i < this->suspended_handles_ && !found; ++i) if (io_handle == this->current_suspended_info_[i].io_handle_ && !this->current_suspended_info_[i].delete_entry_) { found = 1; modified_network_events = &this->current_suspended_info_[i].network_events_; delete_event = this->current_suspended_info_[i].delete_event_; event_handle = this->current_suspended_info_[i].event_handle_; } // Then check the to_be_added handles // // Look for all entries in the to_be_added handles for matching // handle (except those that have been scheduled for deletion) for (i = 0; i < this->handles_to_be_added_ && !found; ++i) if (io_handle == this->to_be_added_info_[i].io_handle_ && !this->to_be_added_info_[i].delete_entry_) { found = 1; modified_network_events = &this->to_be_added_info_[i].network_events_; delete_event = this->to_be_added_info_[i].delete_event_; event_handle = this->to_be_added_info_[i].event_handle_; } old_masks = this->bit_ops (*modified_network_events, new_masks, operation); new_network_events = *modified_network_events; return found; } ACE_Event_Handler * ACE_WFMO_Reactor_Handler_Repository::find_handler (ACE_HANDLE handle) { long existing_masks_ignored = 0; return this->handler (handle, existing_masks_ignored); } ACE_Event_Handler * ACE_WFMO_Reactor_Handler_Repository::handler (ACE_HANDLE handle, long &existing_masks) { int found = 0; size_t i = 0; ACE_Event_Handler *event_handler = 0; existing_masks = 0; // Look for the handle first // First go through the current entries // // Look for all entries in the current handles for matching handle // (except those that have been scheduled for deletion) for (i = 0; i < this->max_handlep1_ && !found; ++i) if ((handle == this->current_info_[i].io_handle_ || handle == this->current_handles_[i]) && !this->current_info_[i].delete_entry_) { found = 1; event_handler = this->current_info_[i].event_handler_; existing_masks = this->current_info_[i].network_events_; } // Then pass through the suspended handles // // Look for all entries in the suspended handles for matching handle // (except those that have been scheduled for deletion) for (i = 0; i < this->suspended_handles_ && !found; ++i) if ((handle == this->current_suspended_info_[i].io_handle_ || handle == this->current_suspended_info_[i].event_handle_) && !this->current_suspended_info_[i].delete_entry_) { found = 1; event_handler = this->current_suspended_info_[i].event_handler_; existing_masks = this->current_suspended_info_[i].network_events_; } // Then check the to_be_added handles // // Look for all entries in the to_be_added handles for matching // handle (except those that have been scheduled for deletion) for (i = 0; i < this->handles_to_be_added_ && !found; ++i) if ((handle == this->to_be_added_info_[i].io_handle_ || handle == this->to_be_added_info_[i].event_handle_) && !this->to_be_added_info_[i].delete_entry_) { found = 1; event_handler = this->to_be_added_info_[i].event_handler_; existing_masks = this->to_be_added_info_[i].network_events_; } if (event_handler) event_handler->add_reference (); return event_handler; } int ACE_WFMO_Reactor_Handler_Repository::handler (ACE_HANDLE handle, ACE_Reactor_Mask user_masks, ACE_Event_Handler **user_event_handler) { long existing_masks = 0; int found = 0; ACE_Event_Handler_var safe_event_handler = this->handler (handle, existing_masks); if (safe_event_handler.handler ()) found = 1; if (!found) return -1; // Otherwise, make sure that the masks that the user is looking for // are on. if (found && ACE_BIT_ENABLED (user_masks, ACE_Event_Handler::READ_MASK)) if (!ACE_BIT_ENABLED (existing_masks, FD_READ) && !ACE_BIT_ENABLED (existing_masks, FD_CLOSE)) found = 0; if (found && ACE_BIT_ENABLED (user_masks, ACE_Event_Handler::WRITE_MASK)) if (!ACE_BIT_ENABLED (existing_masks, FD_WRITE)) found = 0; if (found && ACE_BIT_ENABLED (user_masks, ACE_Event_Handler::EXCEPT_MASK)) if (!ACE_BIT_ENABLED (existing_masks, FD_OOB)) found = 0; if (found && ACE_BIT_ENABLED (user_masks, ACE_Event_Handler::ACCEPT_MASK)) if (!ACE_BIT_ENABLED (existing_masks, FD_ACCEPT)) found = 0; if (found && ACE_BIT_ENABLED (user_masks, ACE_Event_Handler::CONNECT_MASK)) if (!ACE_BIT_ENABLED (existing_masks, FD_CONNECT)) found = 0; if (found && ACE_BIT_ENABLED (user_masks, ACE_Event_Handler::QOS_MASK)) if (!ACE_BIT_ENABLED (existing_masks, FD_QOS)) found = 0; if (found && ACE_BIT_ENABLED (user_masks, ACE_Event_Handler::GROUP_QOS_MASK)) if (!ACE_BIT_ENABLED (existing_masks, FD_GROUP_QOS)) found = 0; if (found && user_event_handler) *user_event_handler = safe_event_handler.release (); if (found) return 0; else return -1; } // Waits for and dispatches all events. Returns -1 on error, 0 if // max_wait_time expired, or the number of events that were dispatched. int ACE_WFMO_Reactor::event_handling (ACE_Time_Value *max_wait_time, int alertable) { ACE_TRACE ("ACE_WFMO_Reactor::event_handling"); // Make sure we are not closed if (!this->open_for_business_ || this->deactivated_) { errno = ESHUTDOWN; return -1; } // Stash the current time -- the destructor of this object will // automatically compute how much time elapsed since this method was // called. ACE_Countdown_Time countdown (max_wait_time); int result; do { // Check to see if it is ok to enter ::WaitForMultipleObjects // This will acquire <this->lock_> on success On failure, the // lock will not be acquired result = this->ok_to_wait (max_wait_time, alertable); if (result != 1) return result; // Increment the number of active threads ++this->active_threads_; // Release the <lock_> this->lock_.release (); // Update the countdown to reflect time waiting to play with the // mut and event. countdown.update (); // Calculate timeout int timeout = this->calculate_timeout (max_wait_time); // Wait for event to happen DWORD wait_status = this->wait_for_multiple_events (timeout, alertable); // Upcall result = this->safe_dispatch (wait_status); if (0 == result) { // wait_for_multiple_events timed out without dispatching // anything. Because of rounding and conversion errors and // such, it could be that the wait loop timed out, but // the timer queue said it wasn't quite ready to expire a // timer. In this case, max_wait_time won't have quite been // reduced to 0, and we need to go around again. If max_wait_time // is all the way to 0, just return, as the entire time the // caller wanted to wait has been used up. countdown.update (); // Reflect time waiting for events if (0 == max_wait_time || max_wait_time->usec () == 0) break; } } while (result == 0); return result; } int ACE_WFMO_Reactor::ok_to_wait (ACE_Time_Value *max_wait_time, int alertable) { // Calculate the max time we should spend here // // Note: There is really no need to involve the <timer_queue_> here // because even if a timeout in the <timer_queue_> does expire we // will not be able to dispatch it // We need to wait for both the <lock_> and <ok_to_wait_> event. // If not on WinCE, use WaitForMultipleObjects() to wait for both atomically. // On WinCE, the waitAll arg to WFMO must be false, so wait for the // ok_to_wait_ event first (since that's likely to take the longest) then // grab the lock and recheck the ok_to_wait_ event. When we can get them // both, or there's an error/timeout, return. #if defined (ACE_HAS_WINCE) ACE_UNUSED_ARG (alertable); ACE_Time_Value timeout; if (max_wait_time != 0) { timeout = ACE_OS::gettimeofday (); timeout += *max_wait_time; } while (1) { int status; if (max_wait_time == 0) status = this->ok_to_wait_.wait (); else status = this->ok_to_wait_.wait (&timeout); if (status == -1) return -1; // The event is signaled, so it's ok to wait; grab the lock and // recheck the event. If something has changed, restart the wait. if (max_wait_time == 0) status = this->lock_.acquire (); else { status = this->lock_.acquire (timeout); } if (status == -1) return -1; // Have the lock_, now re-check the event. If it's not signaled, // another thread changed something so go back and wait again. if (this->ok_to_wait_.wait (&ACE_Time_Value::zero, 0) == 0) break; this->lock_.release (); } return 1; #else int timeout = max_wait_time == 0 ? INFINITE : max_wait_time->msec (); DWORD result = 0; while (1) { # if defined (ACE_HAS_PHARLAP) // PharLap doesn't implement WaitForMultipleObjectsEx, and doesn't // do async I/O, so it's not needed in this case anyway. result = ::WaitForMultipleObjects (sizeof this->atomic_wait_array_ / sizeof (ACE_HANDLE), this->atomic_wait_array_, TRUE, timeout); if (result != WAIT_IO_COMPLETION) break; # else result = ::WaitForMultipleObjectsEx (sizeof this->atomic_wait_array_ / sizeof (ACE_HANDLE), this->atomic_wait_array_, TRUE, timeout, alertable); if (result != WAIT_IO_COMPLETION) break; # endif /* ACE_HAS_PHARLAP */ } switch (result) { case WAIT_TIMEOUT: errno = ETIME; return 0; case WAIT_FAILED: case WAIT_ABANDONED_0: ACE_OS::set_errno_to_last_error (); return -1; default: break; } // It is ok to enter ::WaitForMultipleObjects return 1; #endif /* ACE_HAS_WINCE */ } DWORD ACE_WFMO_Reactor::wait_for_multiple_events (int timeout, int alertable) { // Wait for any of handles_ to be active, or until timeout expires. // If <alertable> is enabled allow asynchronous completion of // ReadFile and WriteFile operations. #if defined (ACE_HAS_PHARLAP) || defined (ACE_HAS_WINCE) // PharLap doesn't do async I/O and doesn't implement // WaitForMultipleObjectsEx, so use WaitForMultipleObjects. ACE_UNUSED_ARG (alertable); return ::WaitForMultipleObjects (this->handler_rep_.max_handlep1 (), this->handler_rep_.handles (), FALSE, timeout); #else return ::WaitForMultipleObjectsEx (this->handler_rep_.max_handlep1 (), this->handler_rep_.handles (), FALSE, timeout, alertable); #endif /* ACE_HAS_PHARLAP */ } DWORD ACE_WFMO_Reactor::poll_remaining_handles (DWORD slot) { return ::WaitForMultipleObjects (this->handler_rep_.max_handlep1 () - slot, this->handler_rep_.handles () + slot, FALSE, 0); } int ACE_WFMO_Reactor::calculate_timeout (ACE_Time_Value *max_wait_time) { ACE_Time_Value *time = 0; if (this->owner_ == ACE_Thread::self ()) time = this->timer_queue_->calculate_timeout (max_wait_time); else time = max_wait_time; if (time == 0) return INFINITE; else return time->msec (); } int ACE_WFMO_Reactor::expire_timers (void) { // If "owner" thread if (ACE_Thread::self () == this->owner_) // expire all pending timers. return this->timer_queue_->expire (); else // Nothing to expire return 0; } int ACE_WFMO_Reactor::dispatch (DWORD wait_status) { // Expire timers int handlers_dispatched = this->expire_timers (); switch (wait_status) { case WAIT_FAILED: // Failure. ACE_OS::set_errno_to_last_error (); return -1; case WAIT_TIMEOUT: // Timeout. errno = ETIME; return handlers_dispatched; #ifndef ACE_HAS_WINCE case WAIT_IO_COMPLETION: // APC. return handlers_dispatched; #endif // ACE_HAS_WINCE default: // Dispatch. // We'll let dispatch worry about abandoned mutes. handlers_dispatched += this->dispatch_handles (wait_status); return handlers_dispatched; } } // Dispatches any active handles from <handles_[slot]> to // <handles_[max_handlep1_]>, polling through our handle set looking // for active handles. int ACE_WFMO_Reactor::dispatch_handles (DWORD wait_status) { // dispatch_slot is the absolute slot. Only += is used to // increment it. DWORD dispatch_slot = 0; // Cache this value, this is the absolute value. DWORD const max_handlep1 = this->handler_rep_.max_handlep1 (); // nCount starts off at <max_handlep1>, this is a transient count of // handles last waited on. DWORD nCount = max_handlep1; for (int number_of_handlers_dispatched = 1; ; ++number_of_handlers_dispatched) { const bool ok = ( #if ! defined(__BORLANDC__) \ && !defined (__MINGW32__) \ && !defined (_MSC_VER) // wait_status is unsigned in Borland, Green Hills, // mingw32 and MSVC++ // This >= is always true, with a warning. wait_status >= WAIT_OBJECT_0 && #endif wait_status <= (WAIT_OBJECT_0 + nCount)); if (ok) dispatch_slot += wait_status - WAIT_OBJECT_0; else // Otherwise, a handle was abandoned. dispatch_slot += wait_status - WAIT_ABANDONED_0; // Dispatch handler if (this->dispatch_handler (dispatch_slot, max_handlep1) == -1) return -1; // Increment slot ++dispatch_slot; // We're done. if (dispatch_slot >= max_handlep1) return number_of_handlers_dispatched; // Readjust nCount nCount = max_handlep1 - dispatch_slot; // Check the remaining handles wait_status = this->poll_remaining_handles (dispatch_slot); switch (wait_status) { case WAIT_FAILED: // Failure. ACE_OS::set_errno_to_last_error (); /* FALLTHRU */ case WAIT_TIMEOUT: // There are no more handles ready, we can return. return number_of_handlers_dispatched; } } } int ACE_WFMO_Reactor::dispatch_handler (DWORD slot, DWORD max_handlep1) { // Check if there are window messages that need to be dispatched if (slot == max_handlep1) return this->dispatch_window_messages (); // Dispatch the handler if it has not been scheduled for deletion. // Note that this is a very week test if there are multiple threads // dispatching this slot as no locks are held here. Generally, you // do not want to do something like deleting the this pointer in // handle_close() if you have registered multiple times and there is // more than one thread in WFMO_Reactor->handle_events(). else if (!this->handler_rep_.scheduled_for_deletion (slot)) { ACE_HANDLE event_handle = *(this->handler_rep_.handles () + slot); if (this->handler_rep_.current_info ()[slot].io_entry_) return this->complex_dispatch_handler (slot, event_handle); else return this->simple_dispatch_handler (slot, event_handle); } else // The handle was scheduled for deletion, so we will skip it. return 0; } int ACE_WFMO_Reactor::simple_dispatch_handler (DWORD slot, ACE_HANDLE event_handle) { // This dispatch is used for non-I/O entires // Assign the ``signaled'' HANDLE so that callers can get it. // siginfo_t is an ACE - specific fabrication. Constructor exists. siginfo_t sig (event_handle); ACE_Event_Handler *event_handler = this->handler_rep_.current_info ()[slot].event_handler_; int requires_reference_counting = event_handler->reference_counting_policy ().value () == ACE_Event_Handler::Reference_Counting_Policy::ENABLED; if (requires_reference_counting) { event_handler->add_reference (); } // Upcall if (event_handler->handle_signal (0, &sig) == -1) this->handler_rep_.unbind (event_handle, ACE_Event_Handler::NULL_MASK); // Call remove_reference() if needed. if (requires_reference_counting) { event_handler->remove_reference (); } return 0; } int ACE_WFMO_Reactor::complex_dispatch_handler (DWORD slot, ACE_HANDLE event_handle) { // This dispatch is used for I/O entires. ACE_WFMO_Reactor_Handler_Repository::Current_Info &current_info = this->handler_rep_.current_info ()[slot]; WSANETWORKEVENTS events; ACE_Reactor_Mask problems = ACE_Event_Handler::NULL_MASK; if (::WSAEnumNetworkEvents ((SOCKET) current_info.io_handle_, event_handle, &events) == SOCKET_ERROR) problems = ACE_Event_Handler::ALL_EVENTS_MASK; else { // Prepare for upcalls. Clear the bits from <events> representing // events the handler is not interested in. If there are any left, // do the upcall(s). upcall will replace events.lNetworkEvents // with bits representing any functions that requested a repeat // callback before checking handles again. In this case, continue // to call back unless the handler is unregistered as a result of // one of the upcalls. The way this is written, the upcalls will // keep being done even if one or more upcalls reported problems. // In practice this may turn out not so good, but let's see. If any // problems, please notify Steve Huston <shuston@riverace.com> // before or after you change this code. events.lNetworkEvents &= current_info.network_events_; while (events.lNetworkEvents != 0) { ACE_Event_Handler *event_handler = current_info.event_handler_; int reference_counting_required = event_handler->reference_counting_policy ().value () == ACE_Event_Handler::Reference_Counting_Policy::ENABLED; // Call add_reference() if needed. if (reference_counting_required) { event_handler->add_reference (); } // Upcall problems |= this->upcall (current_info.event_handler_, current_info.io_handle_, events); // Call remove_reference() if needed. if (reference_counting_required) { event_handler->remove_reference (); } if (this->handler_rep_.scheduled_for_deletion (slot)) break; } } if (problems != ACE_Event_Handler::NULL_MASK && !this->handler_rep_.scheduled_for_deletion (slot) ) this->handler_rep_.unbind (event_handle, problems); return 0; } ACE_Reactor_Mask ACE_WFMO_Reactor::upcall (ACE_Event_Handler *event_handler, ACE_HANDLE io_handle, WSANETWORKEVENTS &events) { // This method figures out what exactly has happened to the socket // and then calls appropriate methods. ACE_Reactor_Mask problems = ACE_Event_Handler::NULL_MASK; // Go through the events and do the indicated upcalls. If the handler // doesn't want to be called back, clear the bit for that event. // At the end, set the bits back to <events> to request a repeat call. long actual_events = events.lNetworkEvents; int action; if (ACE_BIT_ENABLED (actual_events, FD_WRITE)) { action = event_handler->handle_output (io_handle); if (action <= 0) { ACE_CLR_BITS (actual_events, FD_WRITE); if (action == -1) ACE_SET_BITS (problems, ACE_Event_Handler::WRITE_MASK); } } if (ACE_BIT_ENABLED (actual_events, FD_CONNECT)) { if (events.iErrorCode[FD_CONNECT_BIT] == 0) { // Successful connect action = event_handler->handle_output (io_handle); if (action <= 0) { ACE_CLR_BITS (actual_events, FD_CONNECT); if (action == -1) ACE_SET_BITS (problems, ACE_Event_Handler::CONNECT_MASK); } } // Unsuccessful connect else { action = event_handler->handle_input (io_handle); if (action <= 0) { ACE_CLR_BITS (actual_events, FD_CONNECT); if (action == -1) ACE_SET_BITS (problems, ACE_Event_Handler::CONNECT_MASK); } } } if (ACE_BIT_ENABLED (actual_events, FD_OOB)) { action = event_handler->handle_exception (io_handle); if (action <= 0) { ACE_CLR_BITS (actual_events, FD_OOB); if (action == -1) ACE_SET_BITS (problems, ACE_Event_Handler::EXCEPT_MASK); } } if (ACE_BIT_ENABLED (actual_events, FD_READ)) { action = event_handler->handle_input (io_handle); if (action <= 0) { ACE_CLR_BITS (actual_events, FD_READ); if (action == -1) ACE_SET_BITS (problems, ACE_Event_Handler::READ_MASK); } } if (ACE_BIT_ENABLED (actual_events, FD_CLOSE) && ACE_BIT_DISABLED (problems, ACE_Event_Handler::READ_MASK)) { action = event_handler->handle_input (io_handle); if (action <= 0) { ACE_CLR_BITS (actual_events, FD_CLOSE); if (action == -1) ACE_SET_BITS (problems, ACE_Event_Handler::READ_MASK); } } if (ACE_BIT_ENABLED (actual_events, FD_ACCEPT)) { action = event_handler->handle_input (io_handle); if (action <= 0) { ACE_CLR_BITS (actual_events, FD_ACCEPT); if (action == -1) ACE_SET_BITS (problems, ACE_Event_Handler::ACCEPT_MASK); } } if (ACE_BIT_ENABLED (actual_events, FD_QOS)) { action = event_handler->handle_qos (io_handle); if (action <= 0) { ACE_CLR_BITS (actual_events, FD_QOS); if (action == -1) ACE_SET_BITS (problems, ACE_Event_Handler::QOS_MASK); } } if (ACE_BIT_ENABLED (actual_events, FD_GROUP_QOS)) { action = event_handler->handle_group_qos (io_handle); if (action <= 0) { ACE_CLR_BITS (actual_events, FD_GROUP_QOS); if (action == -1) ACE_SET_BITS (problems, ACE_Event_Handler::GROUP_QOS_MASK); } } events.lNetworkEvents = actual_events; return problems; } int ACE_WFMO_Reactor::update_state (void) { // This GUARD is necessary since we are updating shared state. ACE_GUARD_RETURN (ACE_Process_Mutex, monitor, this->lock_, -1); // Decrement active threads --this->active_threads_; // Check if the state of the handler repository has changed or new // owner has to be set if (this->handler_rep_.changes_required () || this->new_owner ()) { if (this->change_state_thread_ == 0) // Try to become the thread which will be responsible for the // changes { this->change_state_thread_ = ACE_Thread::self (); // Make sure no new threads are allowed to enter this->ok_to_wait_.reset (); if (this->active_threads_ > 0) // Check for other active threads { // Wake up all other threads this->wakeup_all_threads_.signal (); // Release <lock_> monitor.release (); // Go to sleep waiting for all other threads to get done this->waiting_to_change_state_.wait (); // Re-acquire <lock_> again monitor.acquire (); } // Note that make_changes() calls into user code which can // request other changes. So keep looping until all // requested changes are completed. while (this->handler_rep_.changes_required ()) // Make necessary changes to the handler repository this->handler_rep_.make_changes (); if (this->new_owner ()) // Update the owner this->change_owner (); // Turn off <wakeup_all_threads_> this->wakeup_all_threads_.reset (); // Let everyone know that it is ok to go ahead this->ok_to_wait_.signal (); // Reset this flag this->change_state_thread_ = 0; } else if (this->active_threads_ == 0) // This thread did not get a chance to become the change // thread. If it is the last one out, it will wakeup the // change thread this->waiting_to_change_state_.signal (); } // This is if we were woken up explicitily by the user and there are // no state changes required. else if (this->active_threads_ == 0) // Turn off <wakeup_all_threads_> this->wakeup_all_threads_.reset (); return 0; } void ACE_WFMO_Reactor::dump (void) const { #if defined (ACE_HAS_DUMP) ACE_TRACE ("ACE_WFMO_Reactor::dump"); ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this)); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Count of currently active threads = %d\n"), this->active_threads_)); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("ID of owner thread = %d\n"), this->owner_)); this->handler_rep_.dump (); this->signal_handler_->dump (); this->timer_queue_->dump (); ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP)); #endif /* ACE_HAS_DUMP */ } int ACE_WFMO_Reactor_Notify::dispatch_notifications (int & /*number_of_active_handles*/, ACE_Handle_Set & /*rd_mask*/) { return -1; } int ACE_WFMO_Reactor_Notify::is_dispatchable (ACE_Notification_Buffer & /*buffer*/) { return 0; } ACE_HANDLE ACE_WFMO_Reactor_Notify::notify_handle (void) { return ACE_INVALID_HANDLE; } int ACE_WFMO_Reactor_Notify::read_notify_pipe (ACE_HANDLE , ACE_Notification_Buffer &) { return 0; } int ACE_WFMO_Reactor_Notify::dispatch_notify (ACE_Notification_Buffer &) { return 0; } int ACE_WFMO_Reactor_Notify::close (void) { return -1; } ACE_WFMO_Reactor_Notify::ACE_WFMO_Reactor_Notify (size_t max_notifies) : timer_queue_ (0), message_queue_ (max_notifies * sizeof (ACE_Notification_Buffer), max_notifies * sizeof (ACE_Notification_Buffer)), max_notify_iterations_ (-1) { } int ACE_WFMO_Reactor_Notify::open (ACE_Reactor_Impl *wfmo_reactor, ACE_Timer_Queue *timer_queue, int ignore_notify) { ACE_UNUSED_ARG (ignore_notify); timer_queue_ = timer_queue; return wfmo_reactor->register_handler (this); } ACE_HANDLE ACE_WFMO_Reactor_Notify::get_handle (void) const { return this->wakeup_one_thread_.handle (); } // Handle all pending notifications. int ACE_WFMO_Reactor_Notify::handle_signal (int signum, siginfo_t *siginfo, ucontext_t *) { ACE_UNUSED_ARG (signum); // Just check for sanity... if (siginfo->si_handle_ != this->wakeup_one_thread_.handle ()) return -1; // This will get called when <WFMO_Reactor->wakeup_one_thread_> event // is signaled. // ACE_DEBUG ((LM_DEBUG, // ACE_TEXT ("(%t) waking up to handle internal notifications\n"))); for (int i = 1; ; ++i) { ACE_Message_Block *mb = 0; // Copy ACE_Time_Value::zero since dequeue_head will modify it. ACE_Time_Value zero_timeout (ACE_Time_Value::zero); if (this->message_queue_.dequeue_head (mb, &zero_timeout) == -1) { if (errno == EWOULDBLOCK) // We've reached the end of the processing, return // normally. return 0; else return -1; // Something weird happened... } else { ACE_Notification_Buffer *buffer = reinterpret_cast <ACE_Notification_Buffer *> (mb->base ()); // If eh == 0 then we've got major problems! Otherwise, we // need to dispatch the appropriate handle_* method on the // ACE_Event_Handler pointer we've been passed. if (buffer->eh_ != 0) { ACE_Event_Handler *event_handler = buffer->eh_; bool const requires_reference_counting = event_handler->reference_counting_policy ().value () == ACE_Event_Handler::Reference_Counting_Policy::ENABLED; int result = 0; switch (buffer->mask_) { case ACE_Event_Handler::READ_MASK: case ACE_Event_Handler::ACCEPT_MASK: result = event_handler->handle_input (ACE_INVALID_HANDLE); break; case ACE_Event_Handler::WRITE_MASK: result = event_handler->handle_output (ACE_INVALID_HANDLE); break; case ACE_Event_Handler::EXCEPT_MASK: result = event_handler->handle_exception (ACE_INVALID_HANDLE); break; case ACE_Event_Handler::QOS_MASK: result = event_handler->handle_qos (ACE_INVALID_HANDLE); break; case ACE_Event_Handler::GROUP_QOS_MASK: result = event_handler->handle_group_qos (ACE_INVALID_HANDLE); break; default: ACE_ERROR ((LM_ERROR, ACE_TEXT ("invalid mask = %d\n"), buffer->mask_)); break; } if (result == -1) event_handler->handle_close (ACE_INVALID_HANDLE, ACE_Event_Handler::EXCEPT_MASK); if (requires_reference_counting) { event_handler->remove_reference (); } } // Make sure to delete the memory regardless of success or // failure! mb->release (); // Bail out if we've reached the <max_notify_iterations_>. // Note that by default <max_notify_iterations_> is -1, so // we'll loop until we're done. if (i == this->max_notify_iterations_) { // If there are still notification in the queue, we need // to wake up again if (!this->message_queue_.is_empty ()) this->wakeup_one_thread_.signal (); // Break the loop as we have reached max_notify_iterations_ return 0; } } } } // Notify the WFMO_Reactor, potentially enqueueing the // <ACE_Event_Handler> for subsequent processing in the WFMO_Reactor // thread of control. int ACE_WFMO_Reactor_Notify::notify (ACE_Event_Handler *event_handler, ACE_Reactor_Mask mask, ACE_Time_Value *timeout) { if (event_handler != 0) { ACE_Message_Block *mb = 0; ACE_NEW_RETURN (mb, ACE_Message_Block (sizeof (ACE_Notification_Buffer)), -1); ACE_Notification_Buffer *buffer = (ACE_Notification_Buffer *) mb->base (); buffer->eh_ = event_handler; buffer->mask_ = mask; // Convert from relative time to absolute time by adding the // current time of day. This is what <ACE_Message_Queue> // expects. if (timeout != 0) *timeout += timer_queue_->gettimeofday (); if (this->message_queue_.enqueue_tail (mb, timeout) == -1) { mb->release (); return -1; } event_handler->add_reference (); } return this->wakeup_one_thread_.signal (); } void ACE_WFMO_Reactor_Notify::max_notify_iterations (int iterations) { ACE_TRACE ("ACE_WFMO_Reactor_Notify::max_notify_iterations"); // Must always be > 0 or < 0 to optimize the loop exit condition. if (iterations == 0) iterations = 1; this->max_notify_iterations_ = iterations; } int ACE_WFMO_Reactor_Notify::max_notify_iterations (void) { ACE_TRACE ("ACE_WFMO_Reactor_Notify::max_notify_iterations"); return this->max_notify_iterations_; } int ACE_WFMO_Reactor_Notify::purge_pending_notifications (ACE_Event_Handler *eh, ACE_Reactor_Mask mask) { ACE_TRACE ("ACE_WFMO_Reactor_Notify::purge_pending_notifications"); // Go over message queue and take out all the matching event // handlers. If eh == 0, purge all. Note that reactor notifies (no // handler specified) are never purged, as this may lose a needed // notify the reactor queued for itself. if (this->message_queue_.is_empty ()) return 0; // Guard against new and/or delivered notifications while purging. // WARNING!!! The use of the notification queue's lock object for // this guard makes use of the knowledge that on Win32, the mutex // protecting the queue is really a CriticalSection, which is // recursive. This is how we can get away with locking it down here // and still calling member functions on the queue object. ACE_GUARD_RETURN (ACE_SYNCH_MUTEX, monitor, this->message_queue_.lock(), -1); // first, copy all to our own local queue. Since we've locked everyone out // of here, there's no need to use any synchronization on this queue. ACE_Message_Queue<ACE_NULL_SYNCH> local_queue; size_t queue_size = this->message_queue_.message_count (); int number_purged = 0; size_t index; for (index = 0; index < queue_size; ++index) { ACE_Message_Block *mb = 0; if (-1 == this->message_queue_.dequeue_head (mb)) return -1; // This shouldn't happen... ACE_Notification_Buffer *buffer = reinterpret_cast<ACE_Notification_Buffer *> (mb->base ()); // If this is not a Reactor notify (it is for a particular handler), // and it matches the specified handler (or purging all), // and applying the mask would totally eliminate the notification, then // release it and count the number purged. if ((0 != buffer->eh_) && (0 == eh || eh == buffer->eh_) && ACE_BIT_DISABLED (buffer->mask_, ~mask)) // the existing notification mask // is left with nothing when // applying the mask { ACE_Event_Handler *event_handler = buffer->eh_; event_handler->remove_reference (); mb->release (); ++number_purged; } else { // To preserve it, move it to the local_queue. But first, if // this is not a Reactor notify (it is for a // particularhandler), and it matches the specified handler // (or purging all), then apply the mask if ((0 != buffer->eh_) && (0 == eh || eh == buffer->eh_)) ACE_CLR_BITS(buffer->mask_, mask); if (-1 == local_queue.enqueue_head (mb)) return -1; } } if (this->message_queue_.message_count ()) { // Should be empty! ACE_ASSERT (0); return -1; } // Now copy back from the local queue to the class queue, taking // care to preserve the original order... queue_size = local_queue.message_count (); for (index = 0; index < queue_size; ++index) { ACE_Message_Block *mb = 0; if (-1 == local_queue.dequeue_head (mb)) { ACE_ASSERT (0); return -1; } if (-1 == this->message_queue_.enqueue_head (mb)) { ACE_ASSERT (0); return -1; } } return number_purged; } void ACE_WFMO_Reactor_Notify::dump (void) const { #if defined (ACE_HAS_DUMP) ACE_TRACE ("ACE_WFMO_Reactor_Notify::dump"); ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this)); this->timer_queue_->dump (); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("Max. iteration: %d\n"), this->max_notify_iterations_)); ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP)); #endif /* ACE_HAS_DUMP */ } void ACE_WFMO_Reactor::max_notify_iterations (int iterations) { ACE_TRACE ("ACE_WFMO_Reactor::max_notify_iterations"); ACE_GUARD (ACE_Process_Mutex, monitor, this->lock_); // Must always be > 0 or < 0 to optimize the loop exit condition. this->notify_handler_->max_notify_iterations (iterations); } int ACE_WFMO_Reactor::max_notify_iterations (void) { ACE_TRACE ("ACE_WFMO_Reactor::max_notify_iterations"); ACE_GUARD_RETURN (ACE_Process_Mutex, monitor, this->lock_, -1); return this->notify_handler_->max_notify_iterations (); } int ACE_WFMO_Reactor::purge_pending_notifications (ACE_Event_Handler *eh, ACE_Reactor_Mask mask) { ACE_TRACE ("ACE_WFMO_Reactor::purge_pending_notifications"); if (this->notify_handler_ == 0) return 0; else return this->notify_handler_->purge_pending_notifications (eh, mask); } int ACE_WFMO_Reactor::resumable_handler (void) { ACE_TRACE ("ACE_WFMO_Reactor::resumable_handler"); return 0; } // No-op WinSOCK2 methods to help WFMO_Reactor compile #if !defined (ACE_HAS_WINSOCK2) || (ACE_HAS_WINSOCK2 == 0) int WSAEventSelect (SOCKET /* s */, WSAEVENT /* hEventObject */, long /* lNetworkEvents */) { return -1; } int WSAEnumNetworkEvents (SOCKET /* s */, WSAEVENT /* hEventObject */, LPWSANETWORKEVENTS /* lpNetworkEvents */) { return -1; } #endif /* !defined ACE_HAS_WINSOCK2 */ ACE_END_VERSIONED_NAMESPACE_DECL #endif /* ACE_WIN32 */
gpl-2.0
ISTweak/android_kernel_sharp_is03
drivers/i2c/busses/i2c-imx.c
519
18122
/* * Copyright (C) 2002 Motorola GSG-China * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. * * Author: * Darius Augulis, Teltonika Inc. * * Desc.: * Implementation of I2C Adapter/Algorithm Driver * for I2C Bus integrated in Freescale i.MX/MXC processors * * Derived from Motorola GSG China I2C example driver * * Copyright (C) 2005 Torsten Koschorrek <koschorrek at synertronixx.de * Copyright (C) 2005 Matthias Blaschke <blaschke at synertronixx.de * Copyright (C) 2007 RightHand Technologies, Inc. * Copyright (C) 2008 Darius Augulis <darius.augulis at teltonika.lt> * */ /** Includes ******************************************************************* *******************************************************************************/ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/io.h> #include <linux/sched.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <mach/irqs.h> #include <mach/hardware.h> #include <mach/i2c.h> /** Defines ******************************************************************** *******************************************************************************/ /* This will be the driver name the kernel reports */ #define DRIVER_NAME "imx-i2c" /* Default value */ #define IMX_I2C_BIT_RATE 100000 /* 100kHz */ /* IMX I2C registers */ #define IMX_I2C_IADR 0x00 /* i2c slave address */ #define IMX_I2C_IFDR 0x04 /* i2c frequency divider */ #define IMX_I2C_I2CR 0x08 /* i2c control */ #define IMX_I2C_I2SR 0x0C /* i2c status */ #define IMX_I2C_I2DR 0x10 /* i2c transfer data */ /* Bits of IMX I2C registers */ #define I2SR_RXAK 0x01 #define I2SR_IIF 0x02 #define I2SR_SRW 0x04 #define I2SR_IAL 0x10 #define I2SR_IBB 0x20 #define I2SR_IAAS 0x40 #define I2SR_ICF 0x80 #define I2CR_RSTA 0x04 #define I2CR_TXAK 0x08 #define I2CR_MTX 0x10 #define I2CR_MSTA 0x20 #define I2CR_IIEN 0x40 #define I2CR_IEN 0x80 /** Variables ****************************************************************** *******************************************************************************/ /* * sorted list of clock divider, register value pairs * taken from table 26-5, p.26-9, Freescale i.MX * Integrated Portable System Processor Reference Manual * Document Number: MC9328MXLRM, Rev. 5.1, 06/2007 * * Duplicated divider values removed from list */ static u16 __initdata i2c_clk_div[50][2] = { { 22, 0x20 }, { 24, 0x21 }, { 26, 0x22 }, { 28, 0x23 }, { 30, 0x00 }, { 32, 0x24 }, { 36, 0x25 }, { 40, 0x26 }, { 42, 0x03 }, { 44, 0x27 }, { 48, 0x28 }, { 52, 0x05 }, { 56, 0x29 }, { 60, 0x06 }, { 64, 0x2A }, { 72, 0x2B }, { 80, 0x2C }, { 88, 0x09 }, { 96, 0x2D }, { 104, 0x0A }, { 112, 0x2E }, { 128, 0x2F }, { 144, 0x0C }, { 160, 0x30 }, { 192, 0x31 }, { 224, 0x32 }, { 240, 0x0F }, { 256, 0x33 }, { 288, 0x10 }, { 320, 0x34 }, { 384, 0x35 }, { 448, 0x36 }, { 480, 0x13 }, { 512, 0x37 }, { 576, 0x14 }, { 640, 0x38 }, { 768, 0x39 }, { 896, 0x3A }, { 960, 0x17 }, { 1024, 0x3B }, { 1152, 0x18 }, { 1280, 0x3C }, { 1536, 0x3D }, { 1792, 0x3E }, { 1920, 0x1B }, { 2048, 0x3F }, { 2304, 0x1C }, { 2560, 0x1D }, { 3072, 0x1E }, { 3840, 0x1F } }; struct imx_i2c_struct { struct i2c_adapter adapter; struct resource *res; struct clk *clk; void __iomem *base; int irq; wait_queue_head_t queue; unsigned long i2csr; unsigned int disable_delay; int stopped; unsigned int ifdr; /* IMX_I2C_IFDR */ }; /** Functions for IMX I2C adapter driver *************************************** *******************************************************************************/ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) { unsigned long orig_jiffies = jiffies; unsigned int temp; dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); while (1) { temp = readb(i2c_imx->base + IMX_I2C_I2SR); if (for_busy && (temp & I2SR_IBB)) break; if (!for_busy && !(temp & I2SR_IBB)) break; if (signal_pending(current)) { dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C Interrupted\n", __func__); return -EINTR; } if (time_after(jiffies, orig_jiffies + HZ / 1000)) { dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C bus is busy\n", __func__); return -EIO; } schedule(); } return 0; } static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx) { int result; result = wait_event_interruptible_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10); if (unlikely(result < 0)) { dev_dbg(&i2c_imx->adapter.dev, "<%s> result < 0\n", __func__); return result; } else if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) { dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__); return -ETIMEDOUT; } dev_dbg(&i2c_imx->adapter.dev, "<%s> TRX complete\n", __func__); i2c_imx->i2csr = 0; return 0; } static int i2c_imx_acked(struct imx_i2c_struct *i2c_imx) { if (readb(i2c_imx->base + IMX_I2C_I2SR) & I2SR_RXAK) { dev_dbg(&i2c_imx->adapter.dev, "<%s> No ACK\n", __func__); return -EIO; /* No ACK */ } dev_dbg(&i2c_imx->adapter.dev, "<%s> ACK received\n", __func__); return 0; } static int i2c_imx_start(struct imx_i2c_struct *i2c_imx) { unsigned int temp = 0; int result; dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); clk_enable(i2c_imx->clk); writeb(i2c_imx->ifdr, i2c_imx->base + IMX_I2C_IFDR); /* Enable I2C controller */ writeb(0, i2c_imx->base + IMX_I2C_I2SR); writeb(I2CR_IEN, i2c_imx->base + IMX_I2C_I2CR); /* Wait controller to be stable */ udelay(50); /* Start I2C transaction */ temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp |= I2CR_MSTA; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); result = i2c_imx_bus_busy(i2c_imx, 1); if (result) return result; i2c_imx->stopped = 0; temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); return result; } static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) { unsigned int temp = 0; if (!i2c_imx->stopped) { /* Stop I2C transaction */ dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp &= ~(I2CR_MSTA | I2CR_MTX); writeb(temp, i2c_imx->base + IMX_I2C_I2CR); i2c_imx->stopped = 1; } if (cpu_is_mx1()) { /* * This delay caused by an i.MXL hardware bug. * If no (or too short) delay, no "STOP" bit will be generated. */ udelay(i2c_imx->disable_delay); } if (!i2c_imx->stopped) i2c_imx_bus_busy(i2c_imx, 0); /* Disable I2C controller */ writeb(0, i2c_imx->base + IMX_I2C_I2CR); clk_disable(i2c_imx->clk); } static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, unsigned int rate) { unsigned int i2c_clk_rate; unsigned int div; int i; /* Divider value calculation */ i2c_clk_rate = clk_get_rate(i2c_imx->clk); div = (i2c_clk_rate + rate - 1) / rate; if (div < i2c_clk_div[0][0]) i = 0; else if (div > i2c_clk_div[ARRAY_SIZE(i2c_clk_div) - 1][0]) i = ARRAY_SIZE(i2c_clk_div) - 1; else for (i = 0; i2c_clk_div[i][0] < div; i++); /* Store divider value */ i2c_imx->ifdr = i2c_clk_div[i][1]; /* * There dummy delay is calculated. * It should be about one I2C clock period long. * This delay is used in I2C bus disable function * to fix chip hardware bug. */ i2c_imx->disable_delay = (500000U * i2c_clk_div[i][0] + (i2c_clk_rate / 2) - 1) / (i2c_clk_rate / 2); /* dev_dbg() can't be used, because adapter is not yet registered */ #ifdef CONFIG_I2C_DEBUG_BUS printk(KERN_DEBUG "I2C: <%s> I2C_CLK=%d, REQ DIV=%d\n", __func__, i2c_clk_rate, div); printk(KERN_DEBUG "I2C: <%s> IFDR[IC]=0x%x, REAL DIV=%d\n", __func__, i2c_clk_div[i][1], i2c_clk_div[i][0]); #endif } static irqreturn_t i2c_imx_isr(int irq, void *dev_id) { struct imx_i2c_struct *i2c_imx = dev_id; unsigned int temp; temp = readb(i2c_imx->base + IMX_I2C_I2SR); if (temp & I2SR_IIF) { /* save status register */ i2c_imx->i2csr = temp; temp &= ~I2SR_IIF; writeb(temp, i2c_imx->base + IMX_I2C_I2SR); wake_up_interruptible(&i2c_imx->queue); return IRQ_HANDLED; } return IRQ_NONE; } static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) { int i, result; dev_dbg(&i2c_imx->adapter.dev, "<%s> write slave address: addr=0x%x\n", __func__, msgs->addr << 1); /* write slave address */ writeb(msgs->addr << 1, i2c_imx->base + IMX_I2C_I2DR); result = i2c_imx_trx_complete(i2c_imx); if (result) return result; result = i2c_imx_acked(i2c_imx); if (result) return result; dev_dbg(&i2c_imx->adapter.dev, "<%s> write data\n", __func__); /* write data */ for (i = 0; i < msgs->len; i++) { dev_dbg(&i2c_imx->adapter.dev, "<%s> write byte: B%d=0x%X\n", __func__, i, msgs->buf[i]); writeb(msgs->buf[i], i2c_imx->base + IMX_I2C_I2DR); result = i2c_imx_trx_complete(i2c_imx); if (result) return result; result = i2c_imx_acked(i2c_imx); if (result) return result; } return 0; } static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) { int i, result; unsigned int temp; dev_dbg(&i2c_imx->adapter.dev, "<%s> write slave address: addr=0x%x\n", __func__, (msgs->addr << 1) | 0x01); /* write slave address */ writeb((msgs->addr << 1) | 0x01, i2c_imx->base + IMX_I2C_I2DR); result = i2c_imx_trx_complete(i2c_imx); if (result) return result; result = i2c_imx_acked(i2c_imx); if (result) return result; dev_dbg(&i2c_imx->adapter.dev, "<%s> setup bus\n", __func__); /* setup bus to read data */ temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp &= ~I2CR_MTX; if (msgs->len - 1) temp &= ~I2CR_TXAK; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); readb(i2c_imx->base + IMX_I2C_I2DR); /* dummy read */ dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__); /* read data */ for (i = 0; i < msgs->len; i++) { result = i2c_imx_trx_complete(i2c_imx); if (result) return result; if (i == (msgs->len - 1)) { /* It must generate STOP before read I2DR to prevent controller from generating another clock cycle */ dev_dbg(&i2c_imx->adapter.dev, "<%s> clear MSTA\n", __func__); temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp &= ~(I2CR_MSTA | I2CR_MTX); writeb(temp, i2c_imx->base + IMX_I2C_I2CR); i2c_imx_bus_busy(i2c_imx, 0); i2c_imx->stopped = 1; } else if (i == (msgs->len - 2)) { dev_dbg(&i2c_imx->adapter.dev, "<%s> set TXAK\n", __func__); temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp |= I2CR_TXAK; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); } msgs->buf[i] = readb(i2c_imx->base + IMX_I2C_I2DR); dev_dbg(&i2c_imx->adapter.dev, "<%s> read byte: B%d=0x%X\n", __func__, i, msgs->buf[i]); } return 0; } static int i2c_imx_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { unsigned int i, temp; int result; struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter); dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); /* Start I2C transfer */ result = i2c_imx_start(i2c_imx); if (result) goto fail0; /* read/write data */ for (i = 0; i < num; i++) { if (i) { dev_dbg(&i2c_imx->adapter.dev, "<%s> repeated start\n", __func__); temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp |= I2CR_RSTA; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); result = i2c_imx_bus_busy(i2c_imx, 1); if (result) goto fail0; } dev_dbg(&i2c_imx->adapter.dev, "<%s> transfer message: %d\n", __func__, i); /* write/read data */ #ifdef CONFIG_I2C_DEBUG_BUS temp = readb(i2c_imx->base + IMX_I2C_I2CR); dev_dbg(&i2c_imx->adapter.dev, "<%s> CONTROL: IEN=%d, IIEN=%d, " "MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", __func__, (temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0), (temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0), (temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0)); temp = readb(i2c_imx->base + IMX_I2C_I2SR); dev_dbg(&i2c_imx->adapter.dev, "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, " "IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", __func__, (temp & I2SR_ICF ? 1 : 0), (temp & I2SR_IAAS ? 1 : 0), (temp & I2SR_IBB ? 1 : 0), (temp & I2SR_IAL ? 1 : 0), (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0), (temp & I2SR_RXAK ? 1 : 0)); #endif if (msgs[i].flags & I2C_M_RD) result = i2c_imx_read(i2c_imx, &msgs[i]); else result = i2c_imx_write(i2c_imx, &msgs[i]); } fail0: /* Stop I2C transfer */ i2c_imx_stop(i2c_imx); dev_dbg(&i2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__, (result < 0) ? "error" : "success msg", (result < 0) ? result : num); return (result < 0) ? result : num; } static u32 i2c_imx_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm i2c_imx_algo = { .master_xfer = i2c_imx_xfer, .functionality = i2c_imx_func, }; static int __init i2c_imx_probe(struct platform_device *pdev) { struct imx_i2c_struct *i2c_imx; struct resource *res; struct imxi2c_platform_data *pdata; void __iomem *base; resource_size_t res_size; int irq; int ret; dev_dbg(&pdev->dev, "<%s>\n", __func__); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "can't get device resources\n"); return -ENOENT; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "can't get irq number\n"); return -ENOENT; } pdata = pdev->dev.platform_data; if (pdata && pdata->init) { ret = pdata->init(&pdev->dev); if (ret) return ret; } res_size = resource_size(res); base = ioremap(res->start, res_size); if (!base) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -EIO; goto fail0; } i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL); if (!i2c_imx) { dev_err(&pdev->dev, "can't allocate interface\n"); ret = -ENOMEM; goto fail1; } if (!request_mem_region(res->start, res_size, DRIVER_NAME)) { ret = -EBUSY; goto fail2; } /* Setup i2c_imx driver structure */ strcpy(i2c_imx->adapter.name, pdev->name); i2c_imx->adapter.owner = THIS_MODULE; i2c_imx->adapter.algo = &i2c_imx_algo; i2c_imx->adapter.dev.parent = &pdev->dev; i2c_imx->adapter.nr = pdev->id; i2c_imx->irq = irq; i2c_imx->base = base; i2c_imx->res = res; /* Get I2C clock */ i2c_imx->clk = clk_get(&pdev->dev, "i2c_clk"); if (IS_ERR(i2c_imx->clk)) { ret = PTR_ERR(i2c_imx->clk); dev_err(&pdev->dev, "can't get I2C clock\n"); goto fail3; } /* Request IRQ */ ret = request_irq(i2c_imx->irq, i2c_imx_isr, 0, pdev->name, i2c_imx); if (ret) { dev_err(&pdev->dev, "can't claim irq %d\n", i2c_imx->irq); goto fail4; } /* Init queue */ init_waitqueue_head(&i2c_imx->queue); /* Set up adapter data */ i2c_set_adapdata(&i2c_imx->adapter, i2c_imx); /* Set up clock divider */ if (pdata && pdata->bitrate) i2c_imx_set_clk(i2c_imx, pdata->bitrate); else i2c_imx_set_clk(i2c_imx, IMX_I2C_BIT_RATE); /* Set up chip registers to defaults */ writeb(0, i2c_imx->base + IMX_I2C_I2CR); writeb(0, i2c_imx->base + IMX_I2C_I2SR); /* Add I2C adapter */ ret = i2c_add_numbered_adapter(&i2c_imx->adapter); if (ret < 0) { dev_err(&pdev->dev, "registration failed\n"); goto fail5; } /* Set up platform driver data */ platform_set_drvdata(pdev, i2c_imx); dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", i2c_imx->irq); dev_dbg(&i2c_imx->adapter.dev, "device resources from 0x%x to 0x%x\n", i2c_imx->res->start, i2c_imx->res->end); dev_dbg(&i2c_imx->adapter.dev, "allocated %d bytes at 0x%x \n", res_size, i2c_imx->res->start); dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", i2c_imx->adapter.name); dev_dbg(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); return 0; /* Return OK */ fail5: free_irq(i2c_imx->irq, i2c_imx); fail4: clk_put(i2c_imx->clk); fail3: release_mem_region(i2c_imx->res->start, resource_size(res)); fail2: kfree(i2c_imx); fail1: iounmap(base); fail0: if (pdata && pdata->exit) pdata->exit(&pdev->dev); return ret; /* Return error number */ } static int __exit i2c_imx_remove(struct platform_device *pdev) { struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev); struct imxi2c_platform_data *pdata = pdev->dev.platform_data; /* remove adapter */ dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n"); i2c_del_adapter(&i2c_imx->adapter); platform_set_drvdata(pdev, NULL); /* free interrupt */ free_irq(i2c_imx->irq, i2c_imx); /* setup chip registers to defaults */ writeb(0, i2c_imx->base + IMX_I2C_IADR); writeb(0, i2c_imx->base + IMX_I2C_IFDR); writeb(0, i2c_imx->base + IMX_I2C_I2CR); writeb(0, i2c_imx->base + IMX_I2C_I2SR); /* Shut down hardware */ if (pdata && pdata->exit) pdata->exit(&pdev->dev); clk_put(i2c_imx->clk); release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res)); iounmap(i2c_imx->base); kfree(i2c_imx); return 0; } static struct platform_driver i2c_imx_driver = { .probe = i2c_imx_probe, .remove = __exit_p(i2c_imx_remove), .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, } }; static int __init i2c_adap_imx_init(void) { return platform_driver_probe(&i2c_imx_driver, i2c_imx_probe); } subsys_initcall(i2c_adap_imx_init); static void __exit i2c_adap_imx_exit(void) { platform_driver_unregister(&i2c_imx_driver); } module_exit(i2c_adap_imx_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Darius Augulis"); MODULE_DESCRIPTION("I2C adapter driver for IMX I2C bus"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
Stane1983/amlogic-m6_m8
drivers/hv/connection.c
519
10882
/* * * Copyright (c) 2009, Microsoft Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Authors: * Haiyang Zhang <haiyangz@microsoft.com> * Hank Janssen <hjanssen@microsoft.com> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/hyperv.h> #include <linux/export.h> #include <asm/hyperv.h> #include "hyperv_vmbus.h" struct vmbus_connection vmbus_connection = { .conn_state = DISCONNECTED, .next_gpadl_handle = ATOMIC_INIT(0xE1E10), }; /* * Negotiated protocol version with the host. */ __u32 vmbus_proto_version; EXPORT_SYMBOL_GPL(vmbus_proto_version); static __u32 vmbus_get_next_version(__u32 current_version) { switch (current_version) { case (VERSION_WIN7): return VERSION_WS2008; case (VERSION_WIN8): return VERSION_WIN7; case (VERSION_WIN8_1): return VERSION_WIN8; case (VERSION_WS2008): default: return VERSION_INVAL; } } static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, __u32 version) { int ret = 0; struct vmbus_channel_initiate_contact *msg; unsigned long flags; init_completion(&msginfo->waitevent); msg = (struct vmbus_channel_initiate_contact *)msginfo->msg; msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT; msg->vmbus_version_requested = version; msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages); msg->monitor_page2 = virt_to_phys( (void *)((unsigned long)vmbus_connection.monitor_pages + PAGE_SIZE)); if (version == VERSION_WIN8_1) msg->target_vcpu = hv_context.vp_index[smp_processor_id()]; /* * Add to list before we send the request since we may * receive the response before returning from this routine */ spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_add_tail(&msginfo->msglistentry, &vmbus_connection.chn_msg_list); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_initiate_contact)); if (ret != 0) { spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&msginfo->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); return ret; } /* Wait for the connection response */ wait_for_completion(&msginfo->waitevent); spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); list_del(&msginfo->msglistentry); spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); /* Check if successful */ if (msginfo->response.version_response.version_supported) { vmbus_connection.conn_state = CONNECTED; } else { return -ECONNREFUSED; } return ret; } /* * vmbus_connect - Sends a connect request on the partition service connection */ int vmbus_connect(void) { int ret = 0; struct vmbus_channel_msginfo *msginfo = NULL; __u32 version; /* Initialize the vmbus connection */ vmbus_connection.conn_state = CONNECTING; vmbus_connection.work_queue = create_workqueue("hv_vmbus_con"); if (!vmbus_connection.work_queue) { ret = -ENOMEM; goto cleanup; } INIT_LIST_HEAD(&vmbus_connection.chn_msg_list); spin_lock_init(&vmbus_connection.channelmsg_lock); INIT_LIST_HEAD(&vmbus_connection.chn_list); spin_lock_init(&vmbus_connection.channel_lock); /* * Setup the vmbus event connection for channel interrupt * abstraction stuff */ vmbus_connection.int_page = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0); if (vmbus_connection.int_page == NULL) { ret = -ENOMEM; goto cleanup; } vmbus_connection.recv_int_page = vmbus_connection.int_page; vmbus_connection.send_int_page = (void *)((unsigned long)vmbus_connection.int_page + (PAGE_SIZE >> 1)); /* * Setup the monitor notification facility. The 1st page for * parent->child and the 2nd page for child->parent */ vmbus_connection.monitor_pages = (void *)__get_free_pages((GFP_KERNEL|__GFP_ZERO), 1); if (vmbus_connection.monitor_pages == NULL) { ret = -ENOMEM; goto cleanup; } msginfo = kzalloc(sizeof(*msginfo) + sizeof(struct vmbus_channel_initiate_contact), GFP_KERNEL); if (msginfo == NULL) { ret = -ENOMEM; goto cleanup; } /* * Negotiate a compatible VMBUS version number with the * host. We start with the highest number we can support * and work our way down until we negotiate a compatible * version. */ version = VERSION_CURRENT; do { ret = vmbus_negotiate_version(msginfo, version); if (ret == 0) break; version = vmbus_get_next_version(version); } while (version != VERSION_INVAL); if (version == VERSION_INVAL) goto cleanup; vmbus_proto_version = version; pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n", host_info_eax, host_info_ebx >> 16, host_info_ebx & 0xFFFF, host_info_ecx, host_info_edx >> 24, host_info_edx & 0xFFFFFF, version >> 16, version & 0xFFFF); kfree(msginfo); return 0; cleanup: pr_err("Unable to connect to host\n"); vmbus_connection.conn_state = DISCONNECTED; if (vmbus_connection.work_queue) destroy_workqueue(vmbus_connection.work_queue); if (vmbus_connection.int_page) { free_pages((unsigned long)vmbus_connection.int_page, 0); vmbus_connection.int_page = NULL; } if (vmbus_connection.monitor_pages) { free_pages((unsigned long)vmbus_connection.monitor_pages, 1); vmbus_connection.monitor_pages = NULL; } kfree(msginfo); return ret; } /* * relid2channel - Get the channel object given its * child relative id (ie channel id) */ struct vmbus_channel *relid2channel(u32 relid) { struct vmbus_channel *channel; struct vmbus_channel *found_channel = NULL; unsigned long flags; spin_lock_irqsave(&vmbus_connection.channel_lock, flags); list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { if (channel->offermsg.child_relid == relid) { found_channel = channel; break; } } spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); return found_channel; } /* * process_chn_event - Process a channel event notification */ static void process_chn_event(u32 relid) { struct vmbus_channel *channel; unsigned long flags; void *arg; bool read_state; u32 bytes_to_read; /* * Find the channel based on this relid and invokes the * channel callback to process the event */ channel = relid2channel(relid); if (!channel) { pr_err("channel not found for relid - %u\n", relid); return; } /* * A channel once created is persistent even when there * is no driver handling the device. An unloading driver * sets the onchannel_callback to NULL under the * protection of the channel inbound_lock. Thus, checking * and invoking the driver specific callback takes care of * orderly unloading of the driver. */ spin_lock_irqsave(&channel->inbound_lock, flags); if (channel->onchannel_callback != NULL) { arg = channel->channel_callback_context; read_state = channel->batched_reading; /* * This callback reads the messages sent by the host. * We can optimize host to guest signaling by ensuring: * 1. While reading the channel, we disable interrupts from * host. * 2. Ensure that we process all posted messages from the host * before returning from this callback. * 3. Once we return, enable signaling from the host. Once this * state is set we check to see if additional packets are * available to read. In this case we repeat the process. */ do { if (read_state) hv_begin_read(&channel->inbound); channel->onchannel_callback(arg); if (read_state) bytes_to_read = hv_end_read(&channel->inbound); else bytes_to_read = 0; } while (read_state && (bytes_to_read != 0)); } else { pr_err("no channel callback for relid - %u\n", relid); } spin_unlock_irqrestore(&channel->inbound_lock, flags); } /* * vmbus_on_event - Handler for events */ void vmbus_on_event(unsigned long data) { u32 dword; u32 maxdword; int bit; u32 relid; u32 *recv_int_page = NULL; void *page_addr; int cpu = smp_processor_id(); union hv_synic_event_flags *event; if ((vmbus_proto_version == VERSION_WS2008) || (vmbus_proto_version == VERSION_WIN7)) { maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5; recv_int_page = vmbus_connection.recv_int_page; } else { /* * When the host is win8 and beyond, the event page * can be directly checked to get the id of the channel * that has the interrupt pending. */ maxdword = HV_EVENT_FLAGS_DWORD_COUNT; page_addr = hv_context.synic_event_page[cpu]; event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; recv_int_page = event->flags32; } /* Check events */ if (!recv_int_page) return; for (dword = 0; dword < maxdword; dword++) { if (!recv_int_page[dword]) continue; for (bit = 0; bit < 32; bit++) { if (sync_test_and_clear_bit(bit, (unsigned long *)&recv_int_page[dword])) { relid = (dword << 5) + bit; if (relid == 0) /* * Special case - vmbus * channel protocol msg */ continue; process_chn_event(relid); } } } } /* * vmbus_post_msg - Send a msg on the vmbus's message connection */ int vmbus_post_msg(void *buffer, size_t buflen) { union hv_connection_id conn_id; int ret = 0; int retries = 0; conn_id.asu32 = 0; conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID; /* * hv_post_message() can have transient failures because of * insufficient resources. Retry the operation a couple of * times before giving up. */ while (retries < 3) { ret = hv_post_message(conn_id, 1, buffer, buflen); if (ret != HV_STATUS_INSUFFICIENT_BUFFERS) return ret; retries++; msleep(100); } return ret; } /* * vmbus_set_event - Send an event notification to the parent */ int vmbus_set_event(struct vmbus_channel *channel) { u32 child_relid = channel->offermsg.child_relid; if (!channel->is_dedicated_interrupt) { /* Each u32 represents 32 channels */ sync_set_bit(child_relid & 31, (unsigned long *)vmbus_connection.send_int_page + (child_relid >> 5)); } return hv_signal_event(channel->sig_event); }
gpl-2.0
gmtorg/stm32_uclinux
net/sunrpc/auth_gss/gss_krb5_mech.c
519
7235
/* * linux/net/sunrpc/gss_krb5_mech.c * * Copyright (c) 2001 The Regents of the University of Michigan. * All rights reserved. * * Andy Adamson <andros@umich.edu> * J. Bruce Fields <bfields@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/gss_krb5.h> #include <linux/sunrpc/xdr.h> #include <linux/crypto.h> #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH #endif static const void * simple_get_bytes(const void *p, const void *end, void *res, int len) { const void *q = (const void *)((const char *)p + len); if (unlikely(q > end || q < p)) return ERR_PTR(-EFAULT); memcpy(res, p, len); return q; } static const void * simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) { const void *q; unsigned int len; p = simple_get_bytes(p, end, &len, sizeof(len)); if (IS_ERR(p)) return p; q = (const void *)((const char *)p + len); if (unlikely(q > end || q < p)) return ERR_PTR(-EFAULT); res->data = kmemdup(p, len, GFP_NOFS); if (unlikely(res->data == NULL)) return ERR_PTR(-ENOMEM); res->len = len; return q; } static inline const void * get_key(const void *p, const void *end, struct crypto_blkcipher **res) { struct xdr_netobj key; int alg; char *alg_name; p = simple_get_bytes(p, end, &alg, sizeof(alg)); if (IS_ERR(p)) goto out_err; p = simple_get_netobj(p, end, &key); if (IS_ERR(p)) goto out_err; switch (alg) { case ENCTYPE_DES_CBC_RAW: alg_name = "cbc(des)"; break; default: printk("gss_kerberos_mech: unsupported algorithm %d\n", alg); goto out_err_free_key; } *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(*res)) { printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name); *res = NULL; goto out_err_free_key; } if (crypto_blkcipher_setkey(*res, key.data, key.len)) { printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name); goto out_err_free_tfm; } kfree(key.data); return p; out_err_free_tfm: crypto_free_blkcipher(*res); out_err_free_key: kfree(key.data); p = ERR_PTR(-EINVAL); out_err: return p; } static int gss_import_sec_context_kerberos(const void *p, size_t len, struct gss_ctx *ctx_id) { const void *end = (const void *)((const char *)p + len); struct krb5_ctx *ctx; int tmp; if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) { p = ERR_PTR(-ENOMEM); goto out_err; } p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); if (IS_ERR(p)) goto out_err_free_ctx; /* The downcall format was designed before we completely understood * the uses of the context fields; so it includes some stuff we * just give some minimal sanity-checking, and some we ignore * completely (like the next twenty bytes): */ if (unlikely(p + 20 > end || p + 20 < p)) goto out_err_free_ctx; p += 20; p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); if (IS_ERR(p)) goto out_err_free_ctx; if (tmp != SGN_ALG_DES_MAC_MD5) { p = ERR_PTR(-ENOSYS); goto out_err_free_ctx; } p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); if (IS_ERR(p)) goto out_err_free_ctx; if (tmp != SEAL_ALG_DES) { p = ERR_PTR(-ENOSYS); goto out_err_free_ctx; } p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); if (IS_ERR(p)) goto out_err_free_ctx; p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send)); if (IS_ERR(p)) goto out_err_free_ctx; p = simple_get_netobj(p, end, &ctx->mech_used); if (IS_ERR(p)) goto out_err_free_ctx; p = get_key(p, end, &ctx->enc); if (IS_ERR(p)) goto out_err_free_mech; p = get_key(p, end, &ctx->seq); if (IS_ERR(p)) goto out_err_free_key1; if (p != end) { p = ERR_PTR(-EFAULT); goto out_err_free_key2; } ctx_id->internal_ctx_id = ctx; dprintk("RPC: Successfully imported new context.\n"); return 0; out_err_free_key2: crypto_free_blkcipher(ctx->seq); out_err_free_key1: crypto_free_blkcipher(ctx->enc); out_err_free_mech: kfree(ctx->mech_used.data); out_err_free_ctx: kfree(ctx); out_err: return PTR_ERR(p); } static void gss_delete_sec_context_kerberos(void *internal_ctx) { struct krb5_ctx *kctx = internal_ctx; crypto_free_blkcipher(kctx->seq); crypto_free_blkcipher(kctx->enc); kfree(kctx->mech_used.data); kfree(kctx); } static const struct gss_api_ops gss_kerberos_ops = { .gss_import_sec_context = gss_import_sec_context_kerberos, .gss_get_mic = gss_get_mic_kerberos, .gss_verify_mic = gss_verify_mic_kerberos, .gss_wrap = gss_wrap_kerberos, .gss_unwrap = gss_unwrap_kerberos, .gss_delete_sec_context = gss_delete_sec_context_kerberos, }; static struct pf_desc gss_kerberos_pfs[] = { [0] = { .pseudoflavor = RPC_AUTH_GSS_KRB5, .service = RPC_GSS_SVC_NONE, .name = "krb5", }, [1] = { .pseudoflavor = RPC_AUTH_GSS_KRB5I, .service = RPC_GSS_SVC_INTEGRITY, .name = "krb5i", }, [2] = { .pseudoflavor = RPC_AUTH_GSS_KRB5P, .service = RPC_GSS_SVC_PRIVACY, .name = "krb5p", }, }; static struct gss_api_mech gss_kerberos_mech = { .gm_name = "krb5", .gm_owner = THIS_MODULE, .gm_oid = {9, (void *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x02"}, .gm_ops = &gss_kerberos_ops, .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), .gm_pfs = gss_kerberos_pfs, }; static int __init init_kerberos_module(void) { int status; status = gss_mech_register(&gss_kerberos_mech); if (status) printk("Failed to register kerberos gss mechanism!\n"); return status; } static void __exit cleanup_kerberos_module(void) { gss_mech_unregister(&gss_kerberos_mech); } MODULE_LICENSE("GPL"); module_init(init_kerberos_module); module_exit(cleanup_kerberos_module);
gpl-2.0
sikarash/linux-pm
fs/hfs/brec.c
1287
13816
/* * linux/fs/hfs/brec.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Handle individual btree records */ #include "btree.h" static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd); static int hfs_brec_update_parent(struct hfs_find_data *fd); static int hfs_btree_inc_height(struct hfs_btree *tree); /* Get the length and offset of the given record in the given node */ u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off) { __be16 retval[2]; u16 dataoff; dataoff = node->tree->node_size - (rec + 2) * 2; hfs_bnode_read(node, retval, dataoff, 4); *off = be16_to_cpu(retval[1]); return be16_to_cpu(retval[0]) - *off; } /* Get the length of the key from a keyed record */ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec) { u16 retval, recoff; if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF) return 0; if ((node->type == HFS_NODE_INDEX) && !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) { if (node->tree->attributes & HFS_TREE_BIGKEYS) retval = node->tree->max_key_len + 2; else retval = node->tree->max_key_len + 1; } else { recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2); if (!recoff) return 0; if (node->tree->attributes & HFS_TREE_BIGKEYS) { retval = hfs_bnode_read_u16(node, recoff) + 2; if (retval > node->tree->max_key_len + 2) { pr_err("keylen %d too large\n", retval); retval = 0; } } else { retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1; if (retval > node->tree->max_key_len + 1) { pr_err("keylen %d too large\n", retval); retval = 0; } } } return retval; } int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node; int size, key_len, rec; int data_off, end_off; int idx_rec_off, data_rec_off, end_rec_off; __be32 cnid; tree = fd->tree; if (!fd->bnode) { if (!tree->root) hfs_btree_inc_height(tree); fd->bnode = hfs_bnode_find(tree, tree->leaf_head); if (IS_ERR(fd->bnode)) return PTR_ERR(fd->bnode); fd->record = -1; } new_node = NULL; key_len = (fd->search_key->key_len | 1) + 1; again: /* new record idx and complete record size */ rec = fd->record + 1; size = key_len + entry_len; node = fd->bnode; hfs_bnode_dump(node); /* get last offset */ end_rec_off = tree->node_size - (node->num_recs + 1) * 2; end_off = hfs_bnode_read_u16(node, end_rec_off); end_rec_off -= 2; hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off); if (size > end_rec_off - end_off) { if (new_node) panic("not enough room!\n"); new_node = hfs_bnode_split(fd); if (IS_ERR(new_node)) return PTR_ERR(new_node); goto again; } if (node->type == HFS_NODE_LEAF) { tree->leaf_count++; mark_inode_dirty(tree->inode); } node->num_recs++; /* write new last offset */ hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); hfs_bnode_write_u16(node, end_rec_off, end_off + size); data_off = end_off; data_rec_off = end_rec_off + 2; idx_rec_off = tree->node_size - (rec + 1) * 2; if (idx_rec_off == data_rec_off) goto skip; /* move all following entries */ do { data_off = hfs_bnode_read_u16(node, data_rec_off + 2); hfs_bnode_write_u16(node, data_rec_off, data_off + size); data_rec_off += 2; } while (data_rec_off < idx_rec_off); /* move data away */ hfs_bnode_move(node, data_off + size, data_off, end_off - data_off); skip: hfs_bnode_write(node, fd->search_key, data_off, key_len); hfs_bnode_write(node, entry, data_off + key_len, entry_len); hfs_bnode_dump(node); /* * update parent key if we inserted a key * at the start of the node and it is not the new node */ if (!rec && new_node != node) { hfs_bnode_read_key(node, fd->search_key, data_off + size); hfs_brec_update_parent(fd); } if (new_node) { hfs_bnode_put(fd->bnode); if (!new_node->parent) { hfs_btree_inc_height(tree); new_node->parent = tree->root; } fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index data entry */ cnid = cpu_to_be32(new_node->this); entry = &cnid; entry_len = sizeof(cnid); /* get index key */ hfs_bnode_read_key(new_node, fd->search_key, 14); __hfs_brec_find(fd->bnode, fd); hfs_bnode_put(new_node); new_node = NULL; if (tree->attributes & HFS_TREE_VARIDXKEYS) key_len = fd->search_key->key_len + 1; else { fd->search_key->key_len = tree->max_key_len; key_len = tree->max_key_len + 1; } goto again; } return 0; } int hfs_brec_remove(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *parent; int end_off, rec_off, data_off, size; tree = fd->tree; node = fd->bnode; again: rec_off = tree->node_size - (fd->record + 2) * 2; end_off = tree->node_size - (node->num_recs + 1) * 2; if (node->type == HFS_NODE_LEAF) { tree->leaf_count--; mark_inode_dirty(tree->inode); } hfs_bnode_dump(node); hfs_dbg(BNODE_MOD, "remove_rec: %d, %d\n", fd->record, fd->keylength + fd->entrylength); if (!--node->num_recs) { hfs_bnode_unlink(node); if (!node->parent) return 0; parent = hfs_bnode_find(tree, node->parent); if (IS_ERR(parent)) return PTR_ERR(parent); hfs_bnode_put(node); node = fd->bnode = parent; __hfs_brec_find(node, fd); goto again; } hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); if (rec_off == end_off) goto skip; size = fd->keylength + fd->entrylength; do { data_off = hfs_bnode_read_u16(node, rec_off); hfs_bnode_write_u16(node, rec_off + 2, data_off - size); rec_off -= 2; } while (rec_off >= end_off); /* fill hole */ hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size, data_off - fd->keyoffset - size); skip: hfs_bnode_dump(node); if (!fd->record) hfs_brec_update_parent(fd); return 0; } static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node, *next_node; struct hfs_bnode_desc node_desc; int num_recs, new_rec_off, new_off, old_rec_off; int data_start, data_end, size; tree = fd->tree; node = fd->bnode; new_node = hfs_bmap_alloc(tree); if (IS_ERR(new_node)) return new_node; hfs_bnode_get(node); hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n", node->this, new_node->this, node->next); new_node->next = node->next; new_node->prev = node->this; new_node->parent = node->parent; new_node->type = node->type; new_node->height = node->height; if (node->next) next_node = hfs_bnode_find(tree, node->next); else next_node = NULL; if (IS_ERR(next_node)) { hfs_bnode_put(node); hfs_bnode_put(new_node); return next_node; } size = tree->node_size / 2 - node->num_recs * 2 - 14; old_rec_off = tree->node_size - 4; num_recs = 1; for (;;) { data_start = hfs_bnode_read_u16(node, old_rec_off); if (data_start > size) break; old_rec_off -= 2; if (++num_recs < node->num_recs) continue; /* panic? */ hfs_bnode_put(node); hfs_bnode_put(new_node); if (next_node) hfs_bnode_put(next_node); return ERR_PTR(-ENOSPC); } if (fd->record + 1 < num_recs) { /* new record is in the lower half, * so leave some more space there */ old_rec_off += 2; num_recs--; data_start = hfs_bnode_read_u16(node, old_rec_off); } else { hfs_bnode_put(node); hfs_bnode_get(new_node); fd->bnode = new_node; fd->record -= num_recs; fd->keyoffset -= data_start - 14; fd->entryoffset -= data_start - 14; } new_node->num_recs = node->num_recs - num_recs; node->num_recs = num_recs; new_rec_off = tree->node_size - 2; new_off = 14; size = data_start - new_off; num_recs = new_node->num_recs; data_end = data_start; while (num_recs) { hfs_bnode_write_u16(new_node, new_rec_off, new_off); old_rec_off -= 2; new_rec_off -= 2; data_end = hfs_bnode_read_u16(node, old_rec_off); new_off = data_end - size; num_recs--; } hfs_bnode_write_u16(new_node, new_rec_off, new_off); hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start); /* update new bnode header */ node_desc.next = cpu_to_be32(new_node->next); node_desc.prev = cpu_to_be32(new_node->prev); node_desc.type = new_node->type; node_desc.height = new_node->height; node_desc.num_recs = cpu_to_be16(new_node->num_recs); node_desc.reserved = 0; hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc)); /* update previous bnode header */ node->next = new_node->this; hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc)); node_desc.next = cpu_to_be32(node->next); node_desc.num_recs = cpu_to_be16(node->num_recs); hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc)); /* update next bnode header */ if (next_node) { next_node->prev = new_node->this; hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc)); node_desc.prev = cpu_to_be32(next_node->prev); hfs_bnode_write(next_node, &node_desc, 0, sizeof(node_desc)); hfs_bnode_put(next_node); } else if (node->this == tree->leaf_tail) { /* if there is no next node, this might be the new tail */ tree->leaf_tail = new_node->this; mark_inode_dirty(tree->inode); } hfs_bnode_dump(node); hfs_bnode_dump(new_node); hfs_bnode_put(node); return new_node; } static int hfs_brec_update_parent(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node, *parent; int newkeylen, diff; int rec, rec_off, end_rec_off; int start_off, end_off; tree = fd->tree; node = fd->bnode; new_node = NULL; if (!node->parent) return 0; again: parent = hfs_bnode_find(tree, node->parent); if (IS_ERR(parent)) return PTR_ERR(parent); __hfs_brec_find(parent, fd); if (fd->record < 0) return -ENOENT; hfs_bnode_dump(parent); rec = fd->record; /* size difference between old and new key */ if (tree->attributes & HFS_TREE_VARIDXKEYS) newkeylen = (hfs_bnode_read_u8(node, 14) | 1) + 1; else fd->keylength = newkeylen = tree->max_key_len + 1; hfs_dbg(BNODE_MOD, "update_rec: %d, %d, %d\n", rec, fd->keylength, newkeylen); rec_off = tree->node_size - (rec + 2) * 2; end_rec_off = tree->node_size - (parent->num_recs + 1) * 2; diff = newkeylen - fd->keylength; if (!diff) goto skip; if (diff > 0) { end_off = hfs_bnode_read_u16(parent, end_rec_off); if (end_rec_off - end_off < diff) { printk(KERN_DEBUG "splitting index node...\n"); fd->bnode = parent; new_node = hfs_bnode_split(fd); if (IS_ERR(new_node)) return PTR_ERR(new_node); parent = fd->bnode; rec = fd->record; rec_off = tree->node_size - (rec + 2) * 2; end_rec_off = tree->node_size - (parent->num_recs + 1) * 2; } } end_off = start_off = hfs_bnode_read_u16(parent, rec_off); hfs_bnode_write_u16(parent, rec_off, start_off + diff); start_off -= 4; /* move previous cnid too */ while (rec_off > end_rec_off) { rec_off -= 2; end_off = hfs_bnode_read_u16(parent, rec_off); hfs_bnode_write_u16(parent, rec_off, end_off + diff); } hfs_bnode_move(parent, start_off + diff, start_off, end_off - start_off); skip: hfs_bnode_copy(parent, fd->keyoffset, node, 14, newkeylen); if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) hfs_bnode_write_u8(parent, fd->keyoffset, newkeylen - 1); hfs_bnode_dump(parent); hfs_bnode_put(node); node = parent; if (new_node) { __be32 cnid; fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index key and entry */ hfs_bnode_read_key(new_node, fd->search_key, 14); cnid = cpu_to_be32(new_node->this); __hfs_brec_find(fd->bnode, fd); hfs_brec_insert(fd, &cnid, sizeof(cnid)); hfs_bnode_put(fd->bnode); hfs_bnode_put(new_node); if (!rec) { if (new_node == node) goto out; /* restore search_key */ hfs_bnode_read_key(node, fd->search_key, 14); } } if (!rec && node->parent) goto again; out: fd->bnode = node; return 0; } static int hfs_btree_inc_height(struct hfs_btree *tree) { struct hfs_bnode *node, *new_node; struct hfs_bnode_desc node_desc; int key_size, rec; __be32 cnid; node = NULL; if (tree->root) { node = hfs_bnode_find(tree, tree->root); if (IS_ERR(node)) return PTR_ERR(node); } new_node = hfs_bmap_alloc(tree); if (IS_ERR(new_node)) { hfs_bnode_put(node); return PTR_ERR(new_node); } tree->root = new_node->this; if (!tree->depth) { tree->leaf_head = tree->leaf_tail = new_node->this; new_node->type = HFS_NODE_LEAF; new_node->num_recs = 0; } else { new_node->type = HFS_NODE_INDEX; new_node->num_recs = 1; } new_node->parent = 0; new_node->next = 0; new_node->prev = 0; new_node->height = ++tree->depth; node_desc.next = cpu_to_be32(new_node->next); node_desc.prev = cpu_to_be32(new_node->prev); node_desc.type = new_node->type; node_desc.height = new_node->height; node_desc.num_recs = cpu_to_be16(new_node->num_recs); node_desc.reserved = 0; hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc)); rec = tree->node_size - 2; hfs_bnode_write_u16(new_node, rec, 14); if (node) { /* insert old root idx into new root */ node->parent = tree->root; if (node->type == HFS_NODE_LEAF || tree->attributes & HFS_TREE_VARIDXKEYS) key_size = hfs_bnode_read_u8(node, 14) + 1; else key_size = tree->max_key_len + 1; hfs_bnode_copy(new_node, 14, node, 14, key_size); if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) { key_size = tree->max_key_len + 1; hfs_bnode_write_u8(new_node, 14, tree->max_key_len); } key_size = (key_size + 1) & -2; cnid = cpu_to_be32(node->this); hfs_bnode_write(new_node, &cnid, 14 + key_size, 4); rec -= 2; hfs_bnode_write_u16(new_node, rec, 14 + key_size + 4); hfs_bnode_put(node); } hfs_bnode_put(new_node); mark_inode_dirty(tree->inode); return 0; }
gpl-2.0
airk000/kernel_htc_7x30
kernel/audit_tree.c
1543
22379
#include "audit.h" #include <linux/fsnotify_backend.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/kthread.h> #include <linux/slab.h> struct audit_tree; struct audit_chunk; struct audit_tree { atomic_t count; int goner; struct audit_chunk *root; struct list_head chunks; struct list_head rules; struct list_head list; struct list_head same_root; struct rcu_head head; char pathname[]; }; struct audit_chunk { struct list_head hash; struct fsnotify_mark mark; struct list_head trees; /* with root here */ int dead; int count; atomic_long_t refs; struct rcu_head head; struct node { struct list_head list; struct audit_tree *owner; unsigned index; /* index; upper bit indicates 'will prune' */ } owners[]; }; static LIST_HEAD(tree_list); static LIST_HEAD(prune_list); /* * One struct chunk is attached to each inode of interest. * We replace struct chunk on tagging/untagging. * Rules have pointer to struct audit_tree. * Rules have struct list_head rlist forming a list of rules over * the same tree. * References to struct chunk are collected at audit_inode{,_child}() * time and used in AUDIT_TREE rule matching. * These references are dropped at the same time we are calling * audit_free_names(), etc. * * Cyclic lists galore: * tree.chunks anchors chunk.owners[].list hash_lock * tree.rules anchors rule.rlist audit_filter_mutex * chunk.trees anchors tree.same_root hash_lock * chunk.hash is a hash with middle bits of watch.inode as * a hash function. RCU, hash_lock * * tree is refcounted; one reference for "some rules on rules_list refer to * it", one for each chunk with pointer to it. * * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount * of watch contributes 1 to .refs). * * node.index allows to get from node.list to containing chunk. * MSB of that sucker is stolen to mark taggings that we might have to * revert - several operations have very unpleasant cleanup logics and * that makes a difference. Some. */ static struct fsnotify_group *audit_tree_group; static struct audit_tree *alloc_tree(const char *s) { struct audit_tree *tree; tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); if (tree) { atomic_set(&tree->count, 1); tree->goner = 0; INIT_LIST_HEAD(&tree->chunks); INIT_LIST_HEAD(&tree->rules); INIT_LIST_HEAD(&tree->list); INIT_LIST_HEAD(&tree->same_root); tree->root = NULL; strcpy(tree->pathname, s); } return tree; } static inline void get_tree(struct audit_tree *tree) { atomic_inc(&tree->count); } static void __put_tree(struct rcu_head *rcu) { struct audit_tree *tree = container_of(rcu, struct audit_tree, head); kfree(tree); } static inline void put_tree(struct audit_tree *tree) { if (atomic_dec_and_test(&tree->count)) call_rcu(&tree->head, __put_tree); } /* to avoid bringing the entire thing in audit.h */ const char *audit_tree_path(struct audit_tree *tree) { return tree->pathname; } static void free_chunk(struct audit_chunk *chunk) { int i; for (i = 0; i < chunk->count; i++) { if (chunk->owners[i].owner) put_tree(chunk->owners[i].owner); } kfree(chunk); } void audit_put_chunk(struct audit_chunk *chunk) { if (atomic_long_dec_and_test(&chunk->refs)) free_chunk(chunk); } static void __put_chunk(struct rcu_head *rcu) { struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); audit_put_chunk(chunk); } static void audit_tree_destroy_watch(struct fsnotify_mark *entry) { struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); call_rcu(&chunk->head, __put_chunk); } static struct audit_chunk *alloc_chunk(int count) { struct audit_chunk *chunk; size_t size; int i; size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); chunk = kzalloc(size, GFP_KERNEL); if (!chunk) return NULL; INIT_LIST_HEAD(&chunk->hash); INIT_LIST_HEAD(&chunk->trees); chunk->count = count; atomic_long_set(&chunk->refs, 1); for (i = 0; i < count; i++) { INIT_LIST_HEAD(&chunk->owners[i].list); chunk->owners[i].index = i; } fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); return chunk; } enum {HASH_SIZE = 128}; static struct list_head chunk_hash_heads[HASH_SIZE]; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); static inline struct list_head *chunk_hash(const struct inode *inode) { unsigned long n = (unsigned long)inode / L1_CACHE_BYTES; return chunk_hash_heads + n % HASH_SIZE; } /* hash_lock & entry->lock is held by caller */ static void insert_hash(struct audit_chunk *chunk) { struct fsnotify_mark *entry = &chunk->mark; struct list_head *list; if (!entry->i.inode) return; list = chunk_hash(entry->i.inode); list_add_rcu(&chunk->hash, list); } /* called under rcu_read_lock */ struct audit_chunk *audit_tree_lookup(const struct inode *inode) { struct list_head *list = chunk_hash(inode); struct audit_chunk *p; list_for_each_entry_rcu(p, list, hash) { /* mark.inode may have gone NULL, but who cares? */ if (p->mark.i.inode == inode) { atomic_long_inc(&p->refs); return p; } } return NULL; } int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) { int n; for (n = 0; n < chunk->count; n++) if (chunk->owners[n].owner == tree) return 1; return 0; } /* tagging and untagging inodes with trees */ static struct audit_chunk *find_chunk(struct node *p) { int index = p->index & ~(1U<<31); p -= index; return container_of(p, struct audit_chunk, owners[0]); } static void untag_chunk(struct node *p) { struct audit_chunk *chunk = find_chunk(p); struct fsnotify_mark *entry = &chunk->mark; struct audit_chunk *new = NULL; struct audit_tree *owner; int size = chunk->count - 1; int i, j; fsnotify_get_mark(entry); spin_unlock(&hash_lock); if (size) new = alloc_chunk(size); spin_lock(&entry->lock); if (chunk->dead || !entry->i.inode) { spin_unlock(&entry->lock); if (new) free_chunk(new); goto out; } owner = p->owner; if (!size) { chunk->dead = 1; spin_lock(&hash_lock); list_del_init(&chunk->trees); if (owner->root == chunk) owner->root = NULL; list_del_init(&p->list); list_del_rcu(&chunk->hash); spin_unlock(&hash_lock); spin_unlock(&entry->lock); fsnotify_destroy_mark(entry); fsnotify_put_mark(entry); goto out; } if (!new) goto Fallback; fsnotify_duplicate_mark(&new->mark, entry); if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { free_chunk(new); goto Fallback; } chunk->dead = 1; spin_lock(&hash_lock); list_replace_init(&chunk->trees, &new->trees); if (owner->root == chunk) { list_del_init(&owner->same_root); owner->root = NULL; } for (i = j = 0; j <= size; i++, j++) { struct audit_tree *s; if (&chunk->owners[j] == p) { list_del_init(&p->list); i--; continue; } s = chunk->owners[j].owner; new->owners[i].owner = s; new->owners[i].index = chunk->owners[j].index - j + i; if (!s) /* result of earlier fallback */ continue; get_tree(s); list_replace_init(&chunk->owners[j].list, &new->owners[i].list); } list_replace_rcu(&chunk->hash, &new->hash); list_for_each_entry(owner, &new->trees, same_root) owner->root = new; spin_unlock(&hash_lock); spin_unlock(&entry->lock); fsnotify_destroy_mark(entry); fsnotify_put_mark(entry); goto out; Fallback: // do the best we can spin_lock(&hash_lock); if (owner->root == chunk) { list_del_init(&owner->same_root); owner->root = NULL; } list_del_init(&p->list); p->owner = NULL; put_tree(owner); spin_unlock(&hash_lock); spin_unlock(&entry->lock); out: fsnotify_put_mark(entry); spin_lock(&hash_lock); } static int create_chunk(struct inode *inode, struct audit_tree *tree) { struct fsnotify_mark *entry; struct audit_chunk *chunk = alloc_chunk(1); if (!chunk) return -ENOMEM; entry = &chunk->mark; if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { free_chunk(chunk); return -ENOSPC; } spin_lock(&entry->lock); spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; spin_unlock(&entry->lock); fsnotify_destroy_mark(entry); fsnotify_put_mark(entry); return 0; } chunk->owners[0].index = (1U << 31); chunk->owners[0].owner = tree; get_tree(tree); list_add(&chunk->owners[0].list, &tree->chunks); if (!tree->root) { tree->root = chunk; list_add(&tree->same_root, &chunk->trees); } insert_hash(chunk); spin_unlock(&hash_lock); spin_unlock(&entry->lock); return 0; } /* the first tagged inode becomes root of tree */ static int tag_chunk(struct inode *inode, struct audit_tree *tree) { struct fsnotify_mark *old_entry, *chunk_entry; struct audit_tree *owner; struct audit_chunk *chunk, *old; struct node *p; int n; old_entry = fsnotify_find_inode_mark(audit_tree_group, inode); if (!old_entry) return create_chunk(inode, tree); old = container_of(old_entry, struct audit_chunk, mark); /* are we already there? */ spin_lock(&hash_lock); for (n = 0; n < old->count; n++) { if (old->owners[n].owner == tree) { spin_unlock(&hash_lock); fsnotify_put_mark(old_entry); return 0; } } spin_unlock(&hash_lock); chunk = alloc_chunk(old->count + 1); if (!chunk) { fsnotify_put_mark(old_entry); return -ENOMEM; } chunk_entry = &chunk->mark; spin_lock(&old_entry->lock); if (!old_entry->i.inode) { /* old_entry is being shot, lets just lie */ spin_unlock(&old_entry->lock); fsnotify_put_mark(old_entry); free_chunk(chunk); return -ENOENT; } fsnotify_duplicate_mark(chunk_entry, old_entry); if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { spin_unlock(&old_entry->lock); free_chunk(chunk); fsnotify_put_mark(old_entry); return -ENOSPC; } /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */ spin_lock(&chunk_entry->lock); spin_lock(&hash_lock); /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */ if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); fsnotify_destroy_mark(chunk_entry); fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); return 0; } list_replace_init(&old->trees, &chunk->trees); for (n = 0, p = chunk->owners; n < old->count; n++, p++) { struct audit_tree *s = old->owners[n].owner; p->owner = s; p->index = old->owners[n].index; if (!s) /* result of fallback in untag */ continue; get_tree(s); list_replace_init(&old->owners[n].list, &p->list); } p->index = (chunk->count - 1) | (1U<<31); p->owner = tree; get_tree(tree); list_add(&p->list, &tree->chunks); list_replace_rcu(&old->hash, &chunk->hash); list_for_each_entry(owner, &chunk->trees, same_root) owner->root = chunk; old->dead = 1; if (!tree->root) { tree->root = chunk; list_add(&tree->same_root, &chunk->trees); } spin_unlock(&hash_lock); spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); fsnotify_destroy_mark(old_entry); fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ fsnotify_put_mark(old_entry); /* and kill it */ return 0; } static void kill_rules(struct audit_tree *tree) { struct audit_krule *rule, *next; struct audit_entry *entry; struct audit_buffer *ab; list_for_each_entry_safe(rule, next, &tree->rules, rlist) { entry = container_of(rule, struct audit_entry, rule); list_del_init(&rule->rlist); if (rule->tree) { /* not a half-baked one */ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); audit_log_format(ab, "op="); audit_log_string(ab, "remove rule"); audit_log_format(ab, " dir="); audit_log_untrustedstring(ab, rule->tree->pathname); audit_log_key(ab, rule->filterkey); audit_log_format(ab, " list=%d res=1", rule->listnr); audit_log_end(ab); rule->tree = NULL; list_del_rcu(&entry->list); list_del(&entry->rule.list); call_rcu(&entry->rcu, audit_free_rule_rcu); } } } /* * finish killing struct audit_tree */ static void prune_one(struct audit_tree *victim) { spin_lock(&hash_lock); while (!list_empty(&victim->chunks)) { struct node *p; p = list_entry(victim->chunks.next, struct node, list); untag_chunk(p); } spin_unlock(&hash_lock); put_tree(victim); } /* trim the uncommitted chunks from tree */ static void trim_marked(struct audit_tree *tree) { struct list_head *p, *q; spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); return; } /* reorder */ for (p = tree->chunks.next; p != &tree->chunks; p = q) { struct node *node = list_entry(p, struct node, list); q = p->next; if (node->index & (1U<<31)) { list_del_init(p); list_add(p, &tree->chunks); } } while (!list_empty(&tree->chunks)) { struct node *node; node = list_entry(tree->chunks.next, struct node, list); /* have we run out of marked? */ if (!(node->index & (1U<<31))) break; untag_chunk(node); } if (!tree->root && !tree->goner) { tree->goner = 1; spin_unlock(&hash_lock); mutex_lock(&audit_filter_mutex); kill_rules(tree); list_del_init(&tree->list); mutex_unlock(&audit_filter_mutex); prune_one(tree); } else { spin_unlock(&hash_lock); } } static void audit_schedule_prune(void); /* called with audit_filter_mutex */ int audit_remove_tree_rule(struct audit_krule *rule) { struct audit_tree *tree; tree = rule->tree; if (tree) { spin_lock(&hash_lock); list_del_init(&rule->rlist); if (list_empty(&tree->rules) && !tree->goner) { tree->root = NULL; list_del_init(&tree->same_root); tree->goner = 1; list_move(&tree->list, &prune_list); rule->tree = NULL; spin_unlock(&hash_lock); audit_schedule_prune(); return 1; } rule->tree = NULL; spin_unlock(&hash_lock); return 1; } return 0; } static int compare_root(struct vfsmount *mnt, void *arg) { return mnt->mnt_root->d_inode == arg; } void audit_trim_trees(void) { struct list_head cursor; mutex_lock(&audit_filter_mutex); list_add(&cursor, &tree_list); while (cursor.next != &tree_list) { struct audit_tree *tree; struct path path; struct vfsmount *root_mnt; struct node *node; int err; tree = container_of(cursor.next, struct audit_tree, list); get_tree(tree); list_del(&cursor); list_add(&cursor, &tree->list); mutex_unlock(&audit_filter_mutex); err = kern_path(tree->pathname, 0, &path); if (err) goto skip_it; root_mnt = collect_mounts(&path); path_put(&path); if (!root_mnt) goto skip_it; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) { struct audit_chunk *chunk = find_chunk(node); /* this could be NULL if the watch is dying else where... */ struct inode *inode = chunk->mark.i.inode; node->index |= 1U<<31; if (iterate_mounts(compare_root, inode, root_mnt)) node->index &= ~(1U<<31); } spin_unlock(&hash_lock); trim_marked(tree); put_tree(tree); drop_collected_mounts(root_mnt); skip_it: mutex_lock(&audit_filter_mutex); } list_del(&cursor); mutex_unlock(&audit_filter_mutex); } int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) { if (pathname[0] != '/' || rule->listnr != AUDIT_FILTER_EXIT || op != Audit_equal || rule->inode_f || rule->watch || rule->tree) return -EINVAL; rule->tree = alloc_tree(pathname); if (!rule->tree) return -ENOMEM; return 0; } void audit_put_tree(struct audit_tree *tree) { put_tree(tree); } static int tag_mount(struct vfsmount *mnt, void *arg) { return tag_chunk(mnt->mnt_root->d_inode, arg); } /* called with audit_filter_mutex */ int audit_add_tree_rule(struct audit_krule *rule) { struct audit_tree *seed = rule->tree, *tree; struct path path; struct vfsmount *mnt; int err; list_for_each_entry(tree, &tree_list, list) { if (!strcmp(seed->pathname, tree->pathname)) { put_tree(seed); rule->tree = tree; list_add(&rule->rlist, &tree->rules); return 0; } } tree = seed; list_add(&tree->list, &tree_list); list_add(&rule->rlist, &tree->rules); /* do not set rule->tree yet */ mutex_unlock(&audit_filter_mutex); err = kern_path(tree->pathname, 0, &path); if (err) goto Err; mnt = collect_mounts(&path); path_put(&path); if (!mnt) { err = -ENOMEM; goto Err; } get_tree(tree); err = iterate_mounts(tag_mount, tree, mnt); drop_collected_mounts(mnt); if (!err) { struct node *node; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) node->index &= ~(1U<<31); spin_unlock(&hash_lock); } else { trim_marked(tree); goto Err; } mutex_lock(&audit_filter_mutex); if (list_empty(&rule->rlist)) { put_tree(tree); return -ENOENT; } rule->tree = tree; put_tree(tree); return 0; Err: mutex_lock(&audit_filter_mutex); list_del_init(&tree->list); list_del_init(&tree->rules); put_tree(tree); return err; } int audit_tag_tree(char *old, char *new) { struct list_head cursor, barrier; int failed = 0; struct path path1, path2; struct vfsmount *tagged; int err; err = kern_path(new, 0, &path2); if (err) return err; tagged = collect_mounts(&path2); path_put(&path2); if (!tagged) return -ENOMEM; err = kern_path(old, 0, &path1); if (err) { drop_collected_mounts(tagged); return err; } mutex_lock(&audit_filter_mutex); list_add(&barrier, &tree_list); list_add(&cursor, &barrier); while (cursor.next != &tree_list) { struct audit_tree *tree; int good_one = 0; tree = container_of(cursor.next, struct audit_tree, list); get_tree(tree); list_del(&cursor); list_add(&cursor, &tree->list); mutex_unlock(&audit_filter_mutex); err = kern_path(tree->pathname, 0, &path2); if (!err) { good_one = path_is_under(&path1, &path2); path_put(&path2); } if (!good_one) { put_tree(tree); mutex_lock(&audit_filter_mutex); continue; } failed = iterate_mounts(tag_mount, tree, tagged); if (failed) { put_tree(tree); mutex_lock(&audit_filter_mutex); break; } mutex_lock(&audit_filter_mutex); spin_lock(&hash_lock); if (!tree->goner) { list_del(&tree->list); list_add(&tree->list, &tree_list); } spin_unlock(&hash_lock); put_tree(tree); } while (barrier.prev != &tree_list) { struct audit_tree *tree; tree = container_of(barrier.prev, struct audit_tree, list); get_tree(tree); list_del(&tree->list); list_add(&tree->list, &barrier); mutex_unlock(&audit_filter_mutex); if (!failed) { struct node *node; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) node->index &= ~(1U<<31); spin_unlock(&hash_lock); } else { trim_marked(tree); } put_tree(tree); mutex_lock(&audit_filter_mutex); } list_del(&barrier); list_del(&cursor); mutex_unlock(&audit_filter_mutex); path_put(&path1); drop_collected_mounts(tagged); return failed; } /* * That gets run when evict_chunk() ends up needing to kill audit_tree. * Runs from a separate thread. */ static int prune_tree_thread(void *unused) { mutex_lock(&audit_cmd_mutex); mutex_lock(&audit_filter_mutex); while (!list_empty(&prune_list)) { struct audit_tree *victim; victim = list_entry(prune_list.next, struct audit_tree, list); list_del_init(&victim->list); mutex_unlock(&audit_filter_mutex); prune_one(victim); mutex_lock(&audit_filter_mutex); } mutex_unlock(&audit_filter_mutex); mutex_unlock(&audit_cmd_mutex); return 0; } static void audit_schedule_prune(void) { kthread_run(prune_tree_thread, NULL, "audit_prune_tree"); } /* * ... and that one is done if evict_chunk() decides to delay until the end * of syscall. Runs synchronously. */ void audit_kill_trees(struct list_head *list) { mutex_lock(&audit_cmd_mutex); mutex_lock(&audit_filter_mutex); while (!list_empty(list)) { struct audit_tree *victim; victim = list_entry(list->next, struct audit_tree, list); kill_rules(victim); list_del_init(&victim->list); mutex_unlock(&audit_filter_mutex); prune_one(victim); mutex_lock(&audit_filter_mutex); } mutex_unlock(&audit_filter_mutex); mutex_unlock(&audit_cmd_mutex); } /* * Here comes the stuff asynchronous to auditctl operations */ static void evict_chunk(struct audit_chunk *chunk) { struct audit_tree *owner; struct list_head *postponed = audit_killed_trees(); int need_prune = 0; int n; if (chunk->dead) return; chunk->dead = 1; mutex_lock(&audit_filter_mutex); spin_lock(&hash_lock); while (!list_empty(&chunk->trees)) { owner = list_entry(chunk->trees.next, struct audit_tree, same_root); owner->goner = 1; owner->root = NULL; list_del_init(&owner->same_root); spin_unlock(&hash_lock); if (!postponed) { kill_rules(owner); list_move(&owner->list, &prune_list); need_prune = 1; } else { list_move(&owner->list, postponed); } spin_lock(&hash_lock); } list_del_rcu(&chunk->hash); for (n = 0; n < chunk->count; n++) list_del_init(&chunk->owners[n].list); spin_unlock(&hash_lock); if (need_prune) audit_schedule_prune(); mutex_unlock(&audit_filter_mutex); } static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmonut_mark, struct fsnotify_event *event) { BUG(); return -EOPNOTSUPP; } static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) { struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); evict_chunk(chunk); fsnotify_put_mark(entry); } static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, struct fsnotify_mark *inode_mark, struct fsnotify_mark *vfsmount_mark, __u32 mask, void *data, int data_type) { return false; } static const struct fsnotify_ops audit_tree_ops = { .handle_event = audit_tree_handle_event, .should_send_event = audit_tree_send_event, .free_group_priv = NULL, .free_event_priv = NULL, .freeing_mark = audit_tree_freeing_mark, }; static int __init audit_tree_init(void) { int i; audit_tree_group = fsnotify_alloc_group(&audit_tree_ops); if (IS_ERR(audit_tree_group)) audit_panic("cannot initialize fsnotify group for rectree watches"); for (i = 0; i < HASH_SIZE; i++) INIT_LIST_HEAD(&chunk_hash_heads[i]); return 0; } __initcall(audit_tree_init);
gpl-2.0
SamYaple/bcache-dev
arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
1799
1106
/* * platform_emc1403.c: emc1403 platform data initilization file * * (C) Copyright 2013 Intel Corporation * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/init.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <asm/intel-mid.h> static void __init *emc1403_platform_data(void *info) { static short intr2nd_pdata; struct i2c_board_info *i2c_info = info; int intr = get_gpio_by_name("thermal_int"); int intr2nd = get_gpio_by_name("thermal_alert"); if (intr < 0) return NULL; if (intr2nd < 0) return NULL; i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET; intr2nd_pdata = intr2nd + INTEL_MID_IRQ_OFFSET; return &intr2nd_pdata; } static const struct devs_id emc1403_dev_id __initconst = { .name = "emc1403", .type = SFI_DEV_TYPE_I2C, .delay = 1, .get_platform_data = &emc1403_platform_data, }; sfi_device(emc1403_dev_id);
gpl-2.0
adiwgno/tes
drivers/usb/host/xhci-mem.c
2567
75375
/* * xHCI host controller driver * * Copyright (C) 2008 Intel Corp. * * Author: Sarah Sharp * Some code borrowed from the Linux EHCI driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/usb.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/dmapool.h> #include "xhci.h" /* * Allocates a generic ring segment from the ring pool, sets the dma address, * initializes the segment to zero, and sets the private next pointer to NULL. * * Section 4.11.1.1: * "All components of all Command and Transfer TRBs shall be initialized to '0'" */ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, unsigned int cycle_state, gfp_t flags) { struct xhci_segment *seg; dma_addr_t dma; int i; seg = kzalloc(sizeof *seg, flags); if (!seg) return NULL; seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); if (!seg->trbs) { kfree(seg); return NULL; } memset(seg->trbs, 0, SEGMENT_SIZE); /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ if (cycle_state == 0) { for (i = 0; i < TRBS_PER_SEGMENT; i++) seg->trbs[i].link.control |= TRB_CYCLE; } seg->dma = dma; seg->next = NULL; return seg; } static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) { if (seg->trbs) { dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); seg->trbs = NULL; } kfree(seg); } static void xhci_free_segments_for_ring(struct xhci_hcd *xhci, struct xhci_segment *first) { struct xhci_segment *seg; seg = first->next; while (seg != first) { struct xhci_segment *next = seg->next; xhci_segment_free(xhci, seg); seg = next; } xhci_segment_free(xhci, first); } /* * Make the prev segment point to the next segment. * * Change the last TRB in the prev segment to be a Link TRB which points to the * DMA address of the next segment. The caller needs to set any Link TRB * related flags, such as End TRB, Toggle Cycle, and no snoop. */ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, struct xhci_segment *next, enum xhci_ring_type type) { u32 val; if (!prev || !next) return; prev->next = next; if (type != TYPE_EVENT) { prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = cpu_to_le64(next->dma); /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); val &= ~TRB_TYPE_BITMASK; val |= TRB_TYPE(TRB_LINK); /* Always set the chain bit with 0.95 hardware */ /* Set chain bit for isoc rings on AMD 0.96 host */ if (xhci_link_trb_quirk(xhci) || (type == TYPE_ISOC && (xhci->quirks & XHCI_AMD_0x96_HOST))) val |= TRB_CHAIN; prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); } } /* * Link the ring to the new segments. * Set Toggle Cycle for the new ring if needed. */ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, struct xhci_segment *first, struct xhci_segment *last, unsigned int num_segs) { struct xhci_segment *next; if (!ring || !first || !last) return; next = ring->enq_seg->next; xhci_link_segments(xhci, ring->enq_seg, first, ring->type); xhci_link_segments(xhci, last, next, ring->type); ring->num_segs += num_segs; ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) { ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control &= ~cpu_to_le32(LINK_TOGGLE); last->trbs[TRBS_PER_SEGMENT-1].link.control |= cpu_to_le32(LINK_TOGGLE); ring->last_seg = last; } } /* XXX: Do we need the hcd structure in all these functions? */ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) { if (!ring) return; if (ring->first_seg) xhci_free_segments_for_ring(xhci, ring->first_seg); kfree(ring); } static void xhci_initialize_ring_info(struct xhci_ring *ring, unsigned int cycle_state) { /* The ring is empty, so the enqueue pointer == dequeue pointer */ ring->enqueue = ring->first_seg->trbs; ring->enq_seg = ring->first_seg; ring->dequeue = ring->enqueue; ring->deq_seg = ring->first_seg; /* The ring is initialized to 0. The producer must write 1 to the cycle * bit to handover ownership of the TRB, so PCS = 1. The consumer must * compare CCS to the cycle bit to check ownership, so CCS = 1. * * New rings are initialized with cycle state equal to 1; if we are * handling ring expansion, set the cycle state equal to the old ring. */ ring->cycle_state = cycle_state; /* Not necessary for new rings, but needed for re-initialized rings */ ring->enq_updates = 0; ring->deq_updates = 0; /* * Each segment has a link TRB, and leave an extra TRB for SW * accounting purpose */ ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; } /* Allocate segments and link them for a ring */ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_segment **first, struct xhci_segment **last, unsigned int num_segs, unsigned int cycle_state, enum xhci_ring_type type, gfp_t flags) { struct xhci_segment *prev; prev = xhci_segment_alloc(xhci, cycle_state, flags); if (!prev) return -ENOMEM; num_segs--; *first = prev; while (num_segs > 0) { struct xhci_segment *next; next = xhci_segment_alloc(xhci, cycle_state, flags); if (!next) { xhci_free_segments_for_ring(xhci, *first); return -ENOMEM; } xhci_link_segments(xhci, prev, next, type); prev = next; num_segs--; } xhci_link_segments(xhci, prev, *first, type); *last = prev; return 0; } /** * Create a new ring with zero or more segments. * * Link each segment together into a ring. * Set the end flag and the cycle toggle bit on the last segment. * See section 4.9.1 and figures 15 and 16. */ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, unsigned int cycle_state, enum xhci_ring_type type, gfp_t flags) { struct xhci_ring *ring; int ret; ring = kzalloc(sizeof *(ring), flags); if (!ring) return NULL; ring->num_segs = num_segs; INIT_LIST_HEAD(&ring->td_list); ring->type = type; if (num_segs == 0) return ring; ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, &ring->last_seg, num_segs, cycle_state, type, flags); if (ret) goto fail; /* Only event ring does not use link TRB */ if (type != TYPE_EVENT) { /* See section 4.9.2.1 and 6.4.4.1 */ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE); } xhci_initialize_ring_info(ring, cycle_state); return ring; fail: xhci_ring_free(xhci, ring); return NULL; } void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, unsigned int ep_index) { int rings_cached; rings_cached = virt_dev->num_rings_cached; if (rings_cached < XHCI_MAX_RINGS_CACHED) { virt_dev->ring_cache[rings_cached] = virt_dev->eps[ep_index].ring; virt_dev->num_rings_cached++; xhci_dbg(xhci, "Cached old ring, " "%d ring%s cached\n", virt_dev->num_rings_cached, (virt_dev->num_rings_cached > 1) ? "s" : ""); } else { xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); xhci_dbg(xhci, "Ring cache full (%d rings), " "freeing ring\n", virt_dev->num_rings_cached); } virt_dev->eps[ep_index].ring = NULL; } /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue * pointers to the beginning of the ring. */ static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, unsigned int cycle_state, enum xhci_ring_type type) { struct xhci_segment *seg = ring->first_seg; int i; do { memset(seg->trbs, 0, sizeof(union xhci_trb)*TRBS_PER_SEGMENT); if (cycle_state == 0) { for (i = 0; i < TRBS_PER_SEGMENT; i++) seg->trbs[i].link.control |= TRB_CYCLE; } /* All endpoint rings have link TRBs */ xhci_link_segments(xhci, seg, seg->next, type); seg = seg->next; } while (seg != ring->first_seg); ring->type = type; xhci_initialize_ring_info(ring, cycle_state); /* td list should be empty since all URBs have been cancelled, * but just in case... */ INIT_LIST_HEAD(&ring->td_list); } /* * Expand an existing ring. * Look for a cached ring or allocate a new ring which has same segment numbers * and link the two rings. */ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, unsigned int num_trbs, gfp_t flags) { struct xhci_segment *first; struct xhci_segment *last; unsigned int num_segs; unsigned int num_segs_needed; int ret; num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) / (TRBS_PER_SEGMENT - 1); /* Allocate number of segments we needed, or double the ring size */ num_segs = ring->num_segs > num_segs_needed ? ring->num_segs : num_segs_needed; ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_segs, ring->cycle_state, ring->type, flags); if (ret) return -ENOMEM; xhci_link_rings(xhci, ring, first, last, num_segs); xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n", ring->num_segs); return 0; } #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, int type, gfp_t flags) { struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); if (!ctx) return NULL; BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); ctx->type = type; ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; if (type == XHCI_CTX_TYPE_INPUT) ctx->size += CTX_SIZE(xhci->hcc_params); ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); memset(ctx->bytes, 0, ctx->size); return ctx; } static void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { if (!ctx) return; dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); kfree(ctx); } struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); return (struct xhci_input_control_ctx *)ctx->bytes; } struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { if (ctx->type == XHCI_CTX_TYPE_DEVICE) return (struct xhci_slot_ctx *)ctx->bytes; return (struct xhci_slot_ctx *) (ctx->bytes + CTX_SIZE(xhci->hcc_params)); } struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index) { /* increment ep index by offset of start of ep ctx array */ ep_index++; if (ctx->type == XHCI_CTX_TYPE_INPUT) ep_index++; return (struct xhci_ep_ctx *) (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); } /***************** Streams structures manipulation *************************/ static void xhci_free_stream_ctx(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) dma_free_coherent(&pdev->dev, sizeof(struct xhci_stream_ctx)*num_stream_ctxs, stream_ctx, dma); else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) return dma_pool_free(xhci->small_streams_pool, stream_ctx, dma); else return dma_pool_free(xhci->medium_streams_pool, stream_ctx, dma); } /* * The stream context array for each endpoint with bulk streams enabled can * vary in size, based on: * - how many streams the endpoint supports, * - the maximum primary stream array size the host controller supports, * - and how many streams the device driver asks for. * * The stream context array must be a power of 2, and can be as small as * 64 bytes or as large as 1MB. */ static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, dma_addr_t *dma, gfp_t mem_flags) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE) return dma_alloc_coherent(&pdev->dev, sizeof(struct xhci_stream_ctx)*num_stream_ctxs, dma, mem_flags); else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE) return dma_pool_alloc(xhci->small_streams_pool, mem_flags, dma); else return dma_pool_alloc(xhci->medium_streams_pool, mem_flags, dma); } struct xhci_ring *xhci_dma_to_transfer_ring( struct xhci_virt_ep *ep, u64 address) { if (ep->ep_state & EP_HAS_STREAMS) return radix_tree_lookup(&ep->stream_info->trb_address_map, address >> SEGMENT_SHIFT); return ep->ring; } /* Only use this when you know stream_info is valid */ #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING static struct xhci_ring *dma_to_stream_ring( struct xhci_stream_info *stream_info, u64 address) { return radix_tree_lookup(&stream_info->trb_address_map, address >> SEGMENT_SHIFT); } #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ struct xhci_ring *xhci_stream_id_to_ring( struct xhci_virt_device *dev, unsigned int ep_index, unsigned int stream_id) { struct xhci_virt_ep *ep = &dev->eps[ep_index]; if (stream_id == 0) return ep->ring; if (!ep->stream_info) return NULL; if (stream_id > ep->stream_info->num_streams) return NULL; return ep->stream_info->stream_rings[stream_id]; } #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING static int xhci_test_radix_tree(struct xhci_hcd *xhci, unsigned int num_streams, struct xhci_stream_info *stream_info) { u32 cur_stream; struct xhci_ring *cur_ring; u64 addr; for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { struct xhci_ring *mapped_ring; int trb_size = sizeof(union xhci_trb); cur_ring = stream_info->stream_rings[cur_stream]; for (addr = cur_ring->first_seg->dma; addr < cur_ring->first_seg->dma + SEGMENT_SIZE; addr += trb_size) { mapped_ring = dma_to_stream_ring(stream_info, addr); if (cur_ring != mapped_ring) { xhci_warn(xhci, "WARN: DMA address 0x%08llx " "didn't map to stream ID %u; " "mapped to ring %p\n", (unsigned long long) addr, cur_stream, mapped_ring); return -EINVAL; } } /* One TRB after the end of the ring segment shouldn't return a * pointer to the current ring (although it may be a part of a * different ring). */ mapped_ring = dma_to_stream_ring(stream_info, addr); if (mapped_ring != cur_ring) { /* One TRB before should also fail */ addr = cur_ring->first_seg->dma - trb_size; mapped_ring = dma_to_stream_ring(stream_info, addr); } if (mapped_ring == cur_ring) { xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx " "mapped to valid stream ID %u; " "mapped ring = %p\n", (unsigned long long) addr, cur_stream, mapped_ring); return -EINVAL; } } return 0; } #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ /* * Change an endpoint's internal structure so it supports stream IDs. The * number of requested streams includes stream 0, which cannot be used by device * drivers. * * The number of stream contexts in the stream context array may be bigger than * the number of streams the driver wants to use. This is because the number of * stream context array entries must be a power of two. * * We need a radix tree for mapping physical addresses of TRBs to which stream * ID they belong to. We need to do this because the host controller won't tell * us which stream ring the TRB came from. We could store the stream ID in an * event data TRB, but that doesn't help us for the cancellation case, since the * endpoint may stop before it reaches that event data TRB. * * The radix tree maps the upper portion of the TRB DMA address to a ring * segment that has the same upper portion of DMA addresses. For example, say I * have segments of size 1KB, that are always 64-byte aligned. A segment may * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the * key to the stream ID is 0x43244. I can use the DMA address of the TRB to * pass the radix tree a key to get the right stream ID: * * 0x10c90fff >> 10 = 0x43243 * 0x10c912c0 >> 10 = 0x43244 * 0x10c91400 >> 10 = 0x43245 * * Obviously, only those TRBs with DMA addresses that are within the segment * will make the radix tree return the stream ID for that ring. * * Caveats for the radix tree: * * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit * extended systems (where the DMA address can be bigger than 32-bits), * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that. */ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, unsigned int num_streams, gfp_t mem_flags) { struct xhci_stream_info *stream_info; u32 cur_stream; struct xhci_ring *cur_ring; unsigned long key; u64 addr; int ret; xhci_dbg(xhci, "Allocating %u streams and %u " "stream context array entries.\n", num_streams, num_stream_ctxs); if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) { xhci_dbg(xhci, "Command ring has no reserved TRBs available\n"); return NULL; } xhci->cmd_ring_reserved_trbs++; stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags); if (!stream_info) goto cleanup_trbs; stream_info->num_streams = num_streams; stream_info->num_stream_ctxs = num_stream_ctxs; /* Initialize the array of virtual pointers to stream rings. */ stream_info->stream_rings = kzalloc( sizeof(struct xhci_ring *)*num_streams, mem_flags); if (!stream_info->stream_rings) goto cleanup_info; /* Initialize the array of DMA addresses for stream rings for the HW. */ stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci, num_stream_ctxs, &stream_info->ctx_array_dma, mem_flags); if (!stream_info->stream_ctx_array) goto cleanup_ctx; memset(stream_info->stream_ctx_array, 0, sizeof(struct xhci_stream_ctx)*num_stream_ctxs); /* Allocate everything needed to free the stream rings later */ stream_info->free_streams_command = xhci_alloc_command(xhci, true, true, mem_flags); if (!stream_info->free_streams_command) goto cleanup_ctx; INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC); /* Allocate rings for all the streams that the driver will use, * and add their segment DMA addresses to the radix tree. * Stream 0 is reserved. */ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { stream_info->stream_rings[cur_stream] = xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags); cur_ring = stream_info->stream_rings[cur_stream]; if (!cur_ring) goto cleanup_rings; cur_ring->stream_id = cur_stream; /* Set deq ptr, cycle bit, and stream context type */ addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) | cur_ring->cycle_state; stream_info->stream_ctx_array[cur_stream].stream_ring = cpu_to_le64(addr); xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, (unsigned long long) addr); key = (unsigned long) (cur_ring->first_seg->dma >> SEGMENT_SHIFT); ret = radix_tree_insert(&stream_info->trb_address_map, key, cur_ring); if (ret) { xhci_ring_free(xhci, cur_ring); stream_info->stream_rings[cur_stream] = NULL; goto cleanup_rings; } } /* Leave the other unused stream ring pointers in the stream context * array initialized to zero. This will cause the xHC to give us an * error if the device asks for a stream ID we don't have setup (if it * was any other way, the host controller would assume the ring is * "empty" and wait forever for data to be queued to that stream ID). */ #if XHCI_DEBUG /* Do a little test on the radix tree to make sure it returns the * correct values. */ if (xhci_test_radix_tree(xhci, num_streams, stream_info)) goto cleanup_rings; #endif return stream_info; cleanup_rings: for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { cur_ring = stream_info->stream_rings[cur_stream]; if (cur_ring) { addr = cur_ring->first_seg->dma; radix_tree_delete(&stream_info->trb_address_map, addr >> SEGMENT_SHIFT); xhci_ring_free(xhci, cur_ring); stream_info->stream_rings[cur_stream] = NULL; } } xhci_free_command(xhci, stream_info->free_streams_command); cleanup_ctx: kfree(stream_info->stream_rings); cleanup_info: kfree(stream_info); cleanup_trbs: xhci->cmd_ring_reserved_trbs--; return NULL; } /* * Sets the MaxPStreams field and the Linear Stream Array field. * Sets the dequeue pointer to the stream context array. */ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, struct xhci_ep_ctx *ep_ctx, struct xhci_stream_info *stream_info) { u32 max_primary_streams; /* MaxPStreams is the number of stream context array entries, not the * number we're actually using. Must be in 2^(MaxPstreams + 1) format. * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc. */ max_primary_streams = fls(stream_info->num_stream_ctxs) - 2; xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n", 1 << (max_primary_streams + 1)); ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK); ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) | EP_HAS_LSA); ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); } /* * Sets the MaxPStreams field and the Linear Stream Array field to 0. * Reinstalls the "normal" endpoint ring (at its previous dequeue mark, * not at the beginning of the ring). */ void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, struct xhci_ep_ctx *ep_ctx, struct xhci_virt_ep *ep) { dma_addr_t addr; ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); } /* Frees all stream contexts associated with the endpoint, * * Caller should fix the endpoint context streams fields. */ void xhci_free_stream_info(struct xhci_hcd *xhci, struct xhci_stream_info *stream_info) { int cur_stream; struct xhci_ring *cur_ring; dma_addr_t addr; if (!stream_info) return; for (cur_stream = 1; cur_stream < stream_info->num_streams; cur_stream++) { cur_ring = stream_info->stream_rings[cur_stream]; if (cur_ring) { addr = cur_ring->first_seg->dma; radix_tree_delete(&stream_info->trb_address_map, addr >> SEGMENT_SHIFT); xhci_ring_free(xhci, cur_ring); stream_info->stream_rings[cur_stream] = NULL; } } xhci_free_command(xhci, stream_info->free_streams_command); xhci->cmd_ring_reserved_trbs--; if (stream_info->stream_ctx_array) xhci_free_stream_ctx(xhci, stream_info->num_stream_ctxs, stream_info->stream_ctx_array, stream_info->ctx_array_dma); if (stream_info) kfree(stream_info->stream_rings); kfree(stream_info); } /***************** Device context manipulation *************************/ static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, struct xhci_virt_ep *ep) { init_timer(&ep->stop_cmd_timer); ep->stop_cmd_timer.data = (unsigned long) ep; ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog; ep->xhci = xhci; } static void xhci_free_tt_info(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, int slot_id) { struct list_head *tt; struct list_head *tt_list_head; struct list_head *tt_next; struct xhci_tt_bw_info *tt_info; /* If the device never made it past the Set Address stage, * it may not have the real_port set correctly. */ if (virt_dev->real_port == 0 || virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { xhci_dbg(xhci, "Bad real port.\n"); return; } tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); if (list_empty(tt_list_head)) return; list_for_each(tt, tt_list_head) { tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); if (tt_info->slot_id == slot_id) break; } /* Cautionary measure in case the hub was disconnected before we * stored the TT information. */ if (tt_info->slot_id != slot_id) return; tt_next = tt->next; tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); /* Multi-TT hubs will have more than one entry */ do { list_del(tt); kfree(tt_info); tt = tt_next; if (list_empty(tt_list_head)) break; tt_next = tt->next; tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); } while (tt_info->slot_id == slot_id); } int xhci_alloc_tt_info(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags) { struct xhci_tt_bw_info *tt_info; unsigned int num_ports; int i, j; if (!tt->multi) num_ports = 1; else num_ports = hdev->maxchild; for (i = 0; i < num_ports; i++, tt_info++) { struct xhci_interval_bw_table *bw_table; tt_info = kzalloc(sizeof(*tt_info), mem_flags); if (!tt_info) goto free_tts; INIT_LIST_HEAD(&tt_info->tt_list); list_add(&tt_info->tt_list, &xhci->rh_bw[virt_dev->real_port - 1].tts); tt_info->slot_id = virt_dev->udev->slot_id; if (tt->multi) tt_info->ttport = i+1; bw_table = &tt_info->bw_table; for (j = 0; j < XHCI_MAX_INTERVAL; j++) INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); } return 0; free_tts: xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id); return -ENOMEM; } /* All the xhci_tds in the ring's TD list should be freed at this point. * Should be called with xhci->lock held if there is any chance the TT lists * will be manipulated by the configure endpoint, allocate device, or update * hub functions while this function is removing the TT entries from the list. */ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) { struct xhci_virt_device *dev; int i; int old_active_eps = 0; /* Slot ID 0 is reserved */ if (slot_id == 0 || !xhci->devs[slot_id]) return; dev = xhci->devs[slot_id]; xhci->dcbaa->dev_context_ptrs[slot_id] = 0; if (!dev) return; if (dev->tt_info) old_active_eps = dev->tt_info->active_eps; for (i = 0; i < 31; ++i) { if (dev->eps[i].ring) xhci_ring_free(xhci, dev->eps[i].ring); if (dev->eps[i].stream_info) xhci_free_stream_info(xhci, dev->eps[i].stream_info); /* Endpoints on the TT/root port lists should have been removed * when usb_disable_device() was called for the device. * We can't drop them anyway, because the udev might have gone * away by this point, and we can't tell what speed it was. */ if (!list_empty(&dev->eps[i].bw_endpoint_list)) xhci_warn(xhci, "Slot %u endpoint %u " "not removed from BW list!\n", slot_id, i); } /* If this is a hub, free the TT(s) from the TT list */ xhci_free_tt_info(xhci, dev, slot_id); /* If necessary, update the number of active TTs on this root port */ xhci_update_tt_active_eps(xhci, dev, old_active_eps); if (dev->ring_cache) { for (i = 0; i < dev->num_rings_cached; i++) xhci_ring_free(xhci, dev->ring_cache[i]); kfree(dev->ring_cache); } if (dev->in_ctx) xhci_free_container_ctx(xhci, dev->in_ctx); if (dev->out_ctx) xhci_free_container_ctx(xhci, dev->out_ctx); kfree(xhci->devs[slot_id]); xhci->devs[slot_id] = NULL; } int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags) { struct xhci_virt_device *dev; int i; /* Slot ID 0 is reserved */ if (slot_id == 0 || xhci->devs[slot_id]) { xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); return 0; } xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); if (!xhci->devs[slot_id]) return 0; dev = xhci->devs[slot_id]; /* Allocate the (output) device context that will be used in the HC. */ dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); if (!dev->out_ctx) goto fail; xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, (unsigned long long)dev->out_ctx->dma); /* Allocate the (input) device context for address device command */ dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); if (!dev->in_ctx) goto fail; xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, (unsigned long long)dev->in_ctx->dma); /* Initialize the cancellation list and watchdog timers for each ep */ for (i = 0; i < 31; i++) { xhci_init_endpoint_timer(xhci, &dev->eps[i]); INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); } /* Allocate endpoint 0 ring */ dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags); if (!dev->eps[0].ring) goto fail; /* Allocate pointers to the ring cache */ dev->ring_cache = kzalloc( sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED, flags); if (!dev->ring_cache) goto fail; dev->num_rings_cached = 0; init_completion(&dev->cmd_completion); INIT_LIST_HEAD(&dev->cmd_list); dev->udev = udev; /* Point to output device context in dcbaa. */ xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", slot_id, &xhci->dcbaa->dev_context_ptrs[slot_id], le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); return 1; fail: xhci_free_virt_device(xhci, slot_id); return 0; } void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, struct usb_device *udev) { struct xhci_virt_device *virt_dev; struct xhci_ep_ctx *ep0_ctx; struct xhci_ring *ep_ring; virt_dev = xhci->devs[udev->slot_id]; ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0); ep_ring = virt_dev->eps[0].ring; /* * FIXME we don't keep track of the dequeue pointer very well after a * Set TR dequeue pointer, so we're setting the dequeue pointer of the * host to our enqueue pointer. This should only be called after a * configured device has reset, so all control transfers should have * been completed or cancelled before the reset. */ ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue) | ep_ring->cycle_state); } /* * The xHCI roothub may have ports of differing speeds in any order in the port * status registers. xhci->port_array provides an array of the port speed for * each offset into the port status registers. * * The xHCI hardware wants to know the roothub port number that the USB device * is attached to (or the roothub port its ancestor hub is attached to). All we * know is the index of that port under either the USB 2.0 or the USB 3.0 * roothub, but that doesn't give us the real index into the HW port status * registers. Scan through the xHCI roothub port array, looking for the Nth * entry of the correct port speed. Return the port number of that entry. */ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, struct usb_device *udev) { struct usb_device *top_dev; unsigned int num_similar_speed_ports; unsigned int faked_port_num; int i; for (top_dev = udev; top_dev->parent && top_dev->parent->parent; top_dev = top_dev->parent) /* Found device below root hub */; faked_port_num = top_dev->portnum; for (i = 0, num_similar_speed_ports = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) { u8 port_speed = xhci->port_array[i]; /* * Skip ports that don't have known speeds, or have duplicate * Extended Capabilities port speed entries. */ if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) continue; /* * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and * 1.1 ports are under the USB 2.0 hub. If the port speed * matches the device speed, it's a similar speed port. */ if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER)) num_similar_speed_ports++; if (num_similar_speed_ports == faked_port_num) /* Roothub ports are numbered from 1 to N */ return i+1; } return 0; } /* Setup an xHCI virtual device for a Set Address command */ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) { struct xhci_virt_device *dev; struct xhci_ep_ctx *ep0_ctx; struct xhci_slot_ctx *slot_ctx; u32 port_num; struct usb_device *top_dev; dev = xhci->devs[udev->slot_id]; /* Slot ID 0 is reserved */ if (udev->slot_id == 0 || !dev) { xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", udev->slot_id); return -EINVAL; } ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); /* 3) Only the control endpoint is valid - one endpoint context */ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); switch (udev->speed) { case USB_SPEED_SUPER: slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); break; case USB_SPEED_HIGH: slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); break; case USB_SPEED_FULL: slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); break; case USB_SPEED_LOW: slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); break; case USB_SPEED_WIRELESS: xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); return -EINVAL; break; default: /* Speed was set earlier, this shouldn't happen. */ BUG(); } /* Find the root hub port this device is under */ port_num = xhci_find_real_port_number(xhci, udev); if (!port_num) return -EINVAL; slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num)); /* Set the port number in the virtual_device to the faked port number */ for (top_dev = udev; top_dev->parent && top_dev->parent->parent; top_dev = top_dev->parent) /* Found device below root hub */; dev->fake_port = top_dev->portnum; dev->real_port = port_num; xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num); xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port); /* Find the right bandwidth table that this device will be a part of. * If this is a full speed device attached directly to a root port (or a * decendent of one), it counts as a primary bandwidth domain, not a * secondary bandwidth domain under a TT. An xhci_tt_info structure * will never be created for the HS root hub. */ if (!udev->tt || !udev->tt->hub->parent) { dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table; } else { struct xhci_root_port_bw_info *rh_bw; struct xhci_tt_bw_info *tt_bw; rh_bw = &xhci->rh_bw[port_num - 1]; /* Find the right TT. */ list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) { if (tt_bw->slot_id != udev->tt->hub->slot_id) continue; if (!dev->udev->tt->multi || (udev->tt->multi && tt_bw->ttport == dev->udev->ttport)) { dev->bw_table = &tt_bw->bw_table; dev->tt_info = tt_bw; break; } } if (!dev->tt_info) xhci_warn(xhci, "WARN: Didn't find a matching TT\n"); } /* Is this a LS/FS device under an external HS hub? */ if (udev->tt && udev->tt->hub->parent) { slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id | (udev->ttport << 8)); if (udev->tt->multi) slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); } xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); /* Step 4 - ring already allocated */ /* Step 5 */ ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP)); /* * XXX: Not sure about wireless USB devices. */ switch (udev->speed) { case USB_SPEED_SUPER: ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512)); break; case USB_SPEED_HIGH: /* USB core guesses at a 64-byte max packet first for FS devices */ case USB_SPEED_FULL: ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64)); break; case USB_SPEED_LOW: ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8)); break; case USB_SPEED_WIRELESS: xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); return -EINVAL; break; default: /* New speed? */ BUG(); } /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3)); ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | dev->eps[0].ring->cycle_state); /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ return 0; } /* * Convert interval expressed as 2^(bInterval - 1) == interval into * straight exponent value 2^n == interval. * */ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, struct usb_host_endpoint *ep) { unsigned int interval; interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; if (interval != ep->desc.bInterval - 1) dev_warn(&udev->dev, "ep %#x - rounding interval to %d %sframes\n", ep->desc.bEndpointAddress, 1 << interval, udev->speed == USB_SPEED_FULL ? "" : "micro"); if (udev->speed == USB_SPEED_FULL) { /* * Full speed isoc endpoints specify interval in frames, * not microframes. We are using microframes everywhere, * so adjust accordingly. */ interval += 3; /* 1 frame = 2^3 uframes */ } return interval; } /* * Convert bInterval expressed in microframes (in 1-255 range) to exponent of * microframes, rounded down to nearest power of 2. */ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev, struct usb_host_endpoint *ep, unsigned int desc_interval, unsigned int min_exponent, unsigned int max_exponent) { unsigned int interval; interval = fls(desc_interval) - 1; interval = clamp_val(interval, min_exponent, max_exponent); if ((1 << interval) != desc_interval) dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", ep->desc.bEndpointAddress, 1 << interval, desc_interval); return interval; } static unsigned int xhci_parse_microframe_interval(struct usb_device *udev, struct usb_host_endpoint *ep) { return xhci_microframes_to_exponent(udev, ep, ep->desc.bInterval, 0, 15); } static unsigned int xhci_parse_frame_interval(struct usb_device *udev, struct usb_host_endpoint *ep) { return xhci_microframes_to_exponent(udev, ep, ep->desc.bInterval * 8, 3, 10); } /* Return the polling or NAK interval. * * The polling interval is expressed in "microframes". If xHCI's Interval field * is set to N, it will service the endpoint every 2^(Interval)*125us. * * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval * is set to 0. */ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, struct usb_host_endpoint *ep) { unsigned int interval = 0; switch (udev->speed) { case USB_SPEED_HIGH: /* Max NAK rate */ if (usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_bulk(&ep->desc)) { interval = xhci_parse_microframe_interval(udev, ep); break; } /* Fall through - SS and HS isoc/int have same decoding */ case USB_SPEED_SUPER: if (usb_endpoint_xfer_int(&ep->desc) || usb_endpoint_xfer_isoc(&ep->desc)) { interval = xhci_parse_exponent_interval(udev, ep); } break; case USB_SPEED_FULL: if (usb_endpoint_xfer_isoc(&ep->desc)) { interval = xhci_parse_exponent_interval(udev, ep); break; } /* * Fall through for interrupt endpoint interval decoding * since it uses the same rules as low speed interrupt * endpoints. */ case USB_SPEED_LOW: if (usb_endpoint_xfer_int(&ep->desc) || usb_endpoint_xfer_isoc(&ep->desc)) { interval = xhci_parse_frame_interval(udev, ep); } break; default: BUG(); } return EP_INTERVAL(interval); } /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps. * High speed endpoint descriptors can define "the number of additional * transaction opportunities per microframe", but that goes in the Max Burst * endpoint context field. */ static u32 xhci_get_endpoint_mult(struct usb_device *udev, struct usb_host_endpoint *ep) { if (udev->speed != USB_SPEED_SUPER || !usb_endpoint_xfer_isoc(&ep->desc)) return 0; return ep->ss_ep_comp.bmAttributes; } static u32 xhci_get_endpoint_type(struct usb_device *udev, struct usb_host_endpoint *ep) { int in; u32 type; in = usb_endpoint_dir_in(&ep->desc); if (usb_endpoint_xfer_control(&ep->desc)) { type = EP_TYPE(CTRL_EP); } else if (usb_endpoint_xfer_bulk(&ep->desc)) { if (in) type = EP_TYPE(BULK_IN_EP); else type = EP_TYPE(BULK_OUT_EP); } else if (usb_endpoint_xfer_isoc(&ep->desc)) { if (in) type = EP_TYPE(ISOC_IN_EP); else type = EP_TYPE(ISOC_OUT_EP); } else if (usb_endpoint_xfer_int(&ep->desc)) { if (in) type = EP_TYPE(INT_IN_EP); else type = EP_TYPE(INT_OUT_EP); } else { BUG(); } return type; } /* Return the maximum endpoint service interval time (ESIT) payload. * Basically, this is the maxpacket size, multiplied by the burst size * and mult size. */ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, struct usb_device *udev, struct usb_host_endpoint *ep) { int max_burst; int max_packet; /* Only applies for interrupt or isochronous endpoints */ if (usb_endpoint_xfer_control(&ep->desc) || usb_endpoint_xfer_bulk(&ep->desc)) return 0; if (udev->speed == USB_SPEED_SUPER) return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval); max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; /* A 0 in max burst means 1 transfer per ESIT */ return max_packet * (max_burst + 1); } /* Set up an endpoint with one ring segment. Do not allocate stream rings. * Drivers will have to call usb_alloc_streams() to do that. */ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep, gfp_t mem_flags) { unsigned int ep_index; struct xhci_ep_ctx *ep_ctx; struct xhci_ring *ep_ring; unsigned int max_packet; unsigned int max_burst; enum xhci_ring_type type; u32 max_esit_payload; ep_index = xhci_get_endpoint_index(&ep->desc); ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); type = usb_endpoint_type(&ep->desc); /* Set up the endpoint ring */ virt_dev->eps[ep_index].new_ring = xhci_ring_alloc(xhci, 2, 1, type, mem_flags); if (!virt_dev->eps[ep_index].new_ring) { /* Attempt to use the ring cache */ if (virt_dev->num_rings_cached == 0) return -ENOMEM; virt_dev->eps[ep_index].new_ring = virt_dev->ring_cache[virt_dev->num_rings_cached]; virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; virt_dev->num_rings_cached--; xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1, type); } virt_dev->eps[ep_index].skip = false; ep_ring = virt_dev->eps[ep_index].new_ring; ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state); ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep) | EP_MULT(xhci_get_endpoint_mult(udev, ep))); /* FIXME dig Mult and streams info out of ep companion desc */ /* Allow 3 retries for everything but isoc; * CErr shall be set to 0 for Isoch endpoints. */ if (!usb_endpoint_xfer_isoc(&ep->desc)) ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3)); else ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(0)); ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep)); /* Set the max packet size and max burst */ switch (udev->speed) { case USB_SPEED_SUPER: max_packet = usb_endpoint_maxp(&ep->desc); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); /* dig out max burst from ep companion desc */ max_packet = ep->ss_ep_comp.bMaxBurst; ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); break; case USB_SPEED_HIGH: /* bits 11:12 specify the number of additional transaction * opportunities per microframe (USB 2.0, section 9.6.6) */ if (usb_endpoint_xfer_isoc(&ep->desc) || usb_endpoint_xfer_int(&ep->desc)) { max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); } /* Fall through */ case USB_SPEED_FULL: case USB_SPEED_LOW: max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); break; default: BUG(); } max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); /* * XXX no idea how to calculate the average TRB buffer length for bulk * endpoints, as the driver gives us no clue how big each scatter gather * list entry (or buffer) is going to be. * * For isochronous and interrupt endpoints, we set it to the max * available, until we have new API in the USB core to allow drivers to * declare how much bandwidth they actually need. * * Normally, it would be calculated by taking the total of the buffer * lengths in the TD and then dividing by the number of TRBs in a TD, * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't * use Event Data TRBs, and we don't chain in a link TRB on short * transfers, we're basically dividing by 1. * * xHCI 1.0 specification indicates that the Average TRB Length should * be set to 8 for control endpoints. */ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100) ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8)); else ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload)); /* FIXME Debug endpoint context */ return 0; } void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep) { unsigned int ep_index; struct xhci_ep_ctx *ep_ctx; ep_index = xhci_get_endpoint_index(&ep->desc); ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); ep_ctx->ep_info = 0; ep_ctx->ep_info2 = 0; ep_ctx->deq = 0; ep_ctx->tx_info = 0; /* Don't free the endpoint ring until the set interface or configuration * request succeeds. */ } void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info) { bw_info->ep_interval = 0; bw_info->mult = 0; bw_info->num_packets = 0; bw_info->max_packet_size = 0; bw_info->type = 0; bw_info->max_esit_payload = 0; } void xhci_update_bw_info(struct xhci_hcd *xhci, struct xhci_container_ctx *in_ctx, struct xhci_input_control_ctx *ctrl_ctx, struct xhci_virt_device *virt_dev) { struct xhci_bw_info *bw_info; struct xhci_ep_ctx *ep_ctx; unsigned int ep_type; int i; for (i = 1; i < 31; ++i) { bw_info = &virt_dev->eps[i].bw_info; /* We can't tell what endpoint type is being dropped, but * unconditionally clearing the bandwidth info for non-periodic * endpoints should be harmless because the info will never be * set in the first place. */ if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) { /* Dropped endpoint */ xhci_clear_endpoint_bw_info(bw_info); continue; } if (EP_IS_ADDED(ctrl_ctx, i)) { ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i); ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); /* Ignore non-periodic endpoints */ if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && ep_type != ISOC_IN_EP && ep_type != INT_IN_EP) continue; /* Added or changed endpoint */ bw_info->ep_interval = CTX_TO_EP_INTERVAL( le32_to_cpu(ep_ctx->ep_info)); /* Number of packets and mult are zero-based in the * input context, but we want one-based for the * interval table. */ bw_info->mult = CTX_TO_EP_MULT( le32_to_cpu(ep_ctx->ep_info)) + 1; bw_info->num_packets = CTX_TO_MAX_BURST( le32_to_cpu(ep_ctx->ep_info2)) + 1; bw_info->max_packet_size = MAX_PACKET_DECODED( le32_to_cpu(ep_ctx->ep_info2)); bw_info->type = ep_type; bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD( le32_to_cpu(ep_ctx->tx_info)); } } } /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. * Useful when you want to change one particular aspect of the endpoint and then * issue a configure endpoint command. */ void xhci_endpoint_copy(struct xhci_hcd *xhci, struct xhci_container_ctx *in_ctx, struct xhci_container_ctx *out_ctx, unsigned int ep_index) { struct xhci_ep_ctx *out_ep_ctx; struct xhci_ep_ctx *in_ep_ctx; out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); in_ep_ctx->ep_info = out_ep_ctx->ep_info; in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; in_ep_ctx->deq = out_ep_ctx->deq; in_ep_ctx->tx_info = out_ep_ctx->tx_info; } /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. * Useful when you want to change one particular aspect of the endpoint and then * issue a configure endpoint command. Only the context entries field matters, * but we'll copy the whole thing anyway. */ void xhci_slot_copy(struct xhci_hcd *xhci, struct xhci_container_ctx *in_ctx, struct xhci_container_ctx *out_ctx) { struct xhci_slot_ctx *in_slot_ctx; struct xhci_slot_ctx *out_slot_ctx; in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx); in_slot_ctx->dev_info = out_slot_ctx->dev_info; in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; in_slot_ctx->tt_info = out_slot_ctx->tt_info; in_slot_ctx->dev_state = out_slot_ctx->dev_state; } /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) { int i; struct device *dev = xhci_to_hcd(xhci)->self.controller; int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); if (!num_sp) return 0; xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); if (!xhci->scratchpad) goto fail_sp; xhci->scratchpad->sp_array = dma_alloc_coherent(dev, num_sp * sizeof(u64), &xhci->scratchpad->sp_dma, flags); if (!xhci->scratchpad->sp_array) goto fail_sp2; xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); if (!xhci->scratchpad->sp_buffers) goto fail_sp3; xhci->scratchpad->sp_dma_buffers = kzalloc(sizeof(dma_addr_t) * num_sp, flags); if (!xhci->scratchpad->sp_dma_buffers) goto fail_sp4; xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); for (i = 0; i < num_sp; i++) { dma_addr_t dma; void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, flags); if (!buf) goto fail_sp5; xhci->scratchpad->sp_array[i] = dma; xhci->scratchpad->sp_buffers[i] = buf; xhci->scratchpad->sp_dma_buffers[i] = dma; } return 0; fail_sp5: for (i = i - 1; i >= 0; i--) { dma_free_coherent(dev, xhci->page_size, xhci->scratchpad->sp_buffers[i], xhci->scratchpad->sp_dma_buffers[i]); } kfree(xhci->scratchpad->sp_dma_buffers); fail_sp4: kfree(xhci->scratchpad->sp_buffers); fail_sp3: dma_free_coherent(dev, num_sp * sizeof(u64), xhci->scratchpad->sp_array, xhci->scratchpad->sp_dma); fail_sp2: kfree(xhci->scratchpad); xhci->scratchpad = NULL; fail_sp: return -ENOMEM; } static void scratchpad_free(struct xhci_hcd *xhci) { int num_sp; int i; struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); if (!xhci->scratchpad) return; num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); for (i = 0; i < num_sp; i++) { dma_free_coherent(&pdev->dev, xhci->page_size, xhci->scratchpad->sp_buffers[i], xhci->scratchpad->sp_dma_buffers[i]); } kfree(xhci->scratchpad->sp_dma_buffers); kfree(xhci->scratchpad->sp_buffers); dma_free_coherent(&pdev->dev, num_sp * sizeof(u64), xhci->scratchpad->sp_array, xhci->scratchpad->sp_dma); kfree(xhci->scratchpad); xhci->scratchpad = NULL; } struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, bool allocate_in_ctx, bool allocate_completion, gfp_t mem_flags) { struct xhci_command *command; command = kzalloc(sizeof(*command), mem_flags); if (!command) return NULL; if (allocate_in_ctx) { command->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); if (!command->in_ctx) { kfree(command); return NULL; } } if (allocate_completion) { command->completion = kzalloc(sizeof(struct completion), mem_flags); if (!command->completion) { xhci_free_container_ctx(xhci, command->in_ctx); kfree(command); return NULL; } init_completion(command->completion); } command->status = 0; INIT_LIST_HEAD(&command->cmd_list); return command; } void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv) { if (urb_priv) { kfree(urb_priv->td[0]); kfree(urb_priv); } } void xhci_free_command(struct xhci_hcd *xhci, struct xhci_command *command) { xhci_free_container_ctx(xhci, command->in_ctx); kfree(command->completion); kfree(command); } void xhci_mem_cleanup(struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); struct dev_info *dev_info, *next; unsigned long flags; int size; int i; /* Free the Event Ring Segment Table and the actual Event Ring */ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); if (xhci->erst.entries) dma_free_coherent(&pdev->dev, size, xhci->erst.entries, xhci->erst.erst_dma_addr); xhci->erst.entries = NULL; xhci_dbg(xhci, "Freed ERST\n"); if (xhci->event_ring) xhci_ring_free(xhci, xhci->event_ring); xhci->event_ring = NULL; xhci_dbg(xhci, "Freed event ring\n"); if (xhci->cmd_ring) xhci_ring_free(xhci, xhci->cmd_ring); xhci->cmd_ring = NULL; xhci_dbg(xhci, "Freed command ring\n"); for (i = 1; i < MAX_HC_SLOTS; ++i) xhci_free_virt_device(xhci, i); if (xhci->segment_pool) dma_pool_destroy(xhci->segment_pool); xhci->segment_pool = NULL; xhci_dbg(xhci, "Freed segment pool\n"); if (xhci->device_pool) dma_pool_destroy(xhci->device_pool); xhci->device_pool = NULL; xhci_dbg(xhci, "Freed device context pool\n"); if (xhci->small_streams_pool) dma_pool_destroy(xhci->small_streams_pool); xhci->small_streams_pool = NULL; xhci_dbg(xhci, "Freed small stream array pool\n"); if (xhci->medium_streams_pool) dma_pool_destroy(xhci->medium_streams_pool); xhci->medium_streams_pool = NULL; xhci_dbg(xhci, "Freed medium stream array pool\n"); if (xhci->dcbaa) dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa), xhci->dcbaa, xhci->dcbaa->dma); xhci->dcbaa = NULL; scratchpad_free(xhci); spin_lock_irqsave(&xhci->lock, flags); list_for_each_entry_safe(dev_info, next, &xhci->lpm_failed_devs, list) { list_del(&dev_info->list); kfree(dev_info); } spin_unlock_irqrestore(&xhci->lock, flags); xhci->num_usb2_ports = 0; xhci->num_usb3_ports = 0; kfree(xhci->usb2_ports); kfree(xhci->usb3_ports); kfree(xhci->port_array); kfree(xhci->rh_bw); xhci->page_size = 0; xhci->page_shift = 0; xhci->bus_state[0].bus_suspended = 0; xhci->bus_state[1].bus_suspended = 0; } static int xhci_test_trb_in_td(struct xhci_hcd *xhci, struct xhci_segment *input_seg, union xhci_trb *start_trb, union xhci_trb *end_trb, dma_addr_t input_dma, struct xhci_segment *result_seg, char *test_name, int test_number) { unsigned long long start_dma; unsigned long long end_dma; struct xhci_segment *seg; start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); seg = trb_in_td(input_seg, start_trb, end_trb, input_dma); if (seg != result_seg) { xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", test_name, test_number); xhci_warn(xhci, "Tested TRB math w/ seg %p and " "input DMA 0x%llx\n", input_seg, (unsigned long long) input_dma); xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " "ending TRB %p (0x%llx DMA)\n", start_trb, start_dma, end_trb, end_dma); xhci_warn(xhci, "Expected seg %p, got seg %p\n", result_seg, seg); return -1; } return 0; } /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) { struct { dma_addr_t input_dma; struct xhci_segment *result_seg; } simple_test_vector [] = { /* A zeroed DMA field should fail */ { 0, NULL }, /* One TRB before the ring start should fail */ { xhci->event_ring->first_seg->dma - 16, NULL }, /* One byte before the ring start should fail */ { xhci->event_ring->first_seg->dma - 1, NULL }, /* Starting TRB should succeed */ { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, /* Ending TRB should succeed */ { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, xhci->event_ring->first_seg }, /* One byte after the ring end should fail */ { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, /* One TRB after the ring end should fail */ { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, /* An address of all ones should fail */ { (dma_addr_t) (~0), NULL }, }; struct { struct xhci_segment *input_seg; union xhci_trb *start_trb; union xhci_trb *end_trb; dma_addr_t input_dma; struct xhci_segment *result_seg; } complex_test_vector [] = { /* Test feeding a valid DMA address from a different ring */ { .input_seg = xhci->event_ring->first_seg, .start_trb = xhci->event_ring->first_seg->trbs, .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], .input_dma = xhci->cmd_ring->first_seg->dma, .result_seg = NULL, }, /* Test feeding a valid end TRB from a different ring */ { .input_seg = xhci->event_ring->first_seg, .start_trb = xhci->event_ring->first_seg->trbs, .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], .input_dma = xhci->cmd_ring->first_seg->dma, .result_seg = NULL, }, /* Test feeding a valid start and end TRB from a different ring */ { .input_seg = xhci->event_ring->first_seg, .start_trb = xhci->cmd_ring->first_seg->trbs, .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], .input_dma = xhci->cmd_ring->first_seg->dma, .result_seg = NULL, }, /* TRB in this ring, but after this TD */ { .input_seg = xhci->event_ring->first_seg, .start_trb = &xhci->event_ring->first_seg->trbs[0], .end_trb = &xhci->event_ring->first_seg->trbs[3], .input_dma = xhci->event_ring->first_seg->dma + 4*16, .result_seg = NULL, }, /* TRB in this ring, but before this TD */ { .input_seg = xhci->event_ring->first_seg, .start_trb = &xhci->event_ring->first_seg->trbs[3], .end_trb = &xhci->event_ring->first_seg->trbs[6], .input_dma = xhci->event_ring->first_seg->dma + 2*16, .result_seg = NULL, }, /* TRB in this ring, but after this wrapped TD */ { .input_seg = xhci->event_ring->first_seg, .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], .end_trb = &xhci->event_ring->first_seg->trbs[1], .input_dma = xhci->event_ring->first_seg->dma + 2*16, .result_seg = NULL, }, /* TRB in this ring, but before this wrapped TD */ { .input_seg = xhci->event_ring->first_seg, .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], .end_trb = &xhci->event_ring->first_seg->trbs[1], .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, .result_seg = NULL, }, /* TRB not in this ring, and we have a wrapped TD */ { .input_seg = xhci->event_ring->first_seg, .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], .end_trb = &xhci->event_ring->first_seg->trbs[1], .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, .result_seg = NULL, }, }; unsigned int num_tests; int i, ret; num_tests = ARRAY_SIZE(simple_test_vector); for (i = 0; i < num_tests; i++) { ret = xhci_test_trb_in_td(xhci, xhci->event_ring->first_seg, xhci->event_ring->first_seg->trbs, &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], simple_test_vector[i].input_dma, simple_test_vector[i].result_seg, "Simple", i); if (ret < 0) return ret; } num_tests = ARRAY_SIZE(complex_test_vector); for (i = 0; i < num_tests; i++) { ret = xhci_test_trb_in_td(xhci, complex_test_vector[i].input_seg, complex_test_vector[i].start_trb, complex_test_vector[i].end_trb, complex_test_vector[i].input_dma, complex_test_vector[i].result_seg, "Complex", i); if (ret < 0) return ret; } xhci_dbg(xhci, "TRB math tests passed.\n"); return 0; } static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) { u64 temp; dma_addr_t deq; deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, xhci->event_ring->dequeue); if (deq == 0 && !in_interrupt()) xhci_warn(xhci, "WARN something wrong with SW event ring " "dequeue ptr.\n"); /* Update HC event ring dequeue pointer */ temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); temp &= ERST_PTR_MASK; /* Don't clear the EHB bit (which is RW1C) because * there might be more events to service. */ temp &= ~ERST_EHB; xhci_dbg(xhci, "// Write event ring dequeue pointer, " "preserving EHB bit\n"); xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, &xhci->ir_set->erst_dequeue); } static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, __le32 __iomem *addr, u8 major_revision) { u32 temp, port_offset, port_count; int i; if (major_revision > 0x03) { xhci_warn(xhci, "Ignoring unknown port speed, " "Ext Cap %p, revision = 0x%x\n", addr, major_revision); /* Ignoring port protocol we can't understand. FIXME */ return; } /* Port offset and count in the third dword, see section 7.2 */ temp = xhci_readl(xhci, addr + 2); port_offset = XHCI_EXT_PORT_OFF(temp); port_count = XHCI_EXT_PORT_COUNT(temp); xhci_dbg(xhci, "Ext Cap %p, port offset = %u, " "count = %u, revision = 0x%x\n", addr, port_offset, port_count, major_revision); /* Port count includes the current port offset */ if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) /* WTF? "Valid values are ‘1’ to MaxPorts" */ return; /* Check the host's USB2 LPM capability */ if ((xhci->hci_version == 0x96) && (major_revision != 0x03) && (temp & XHCI_L1C)) { xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n"); xhci->sw_lpm_support = 1; } if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) { xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n"); xhci->sw_lpm_support = 1; if (temp & XHCI_HLC) { xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n"); xhci->hw_lpm_support = 1; } } port_offset--; for (i = port_offset; i < (port_offset + port_count); i++) { /* Duplicate entry. Ignore the port if the revisions differ. */ if (xhci->port_array[i] != 0) { xhci_warn(xhci, "Duplicate port entry, Ext Cap %p," " port %u\n", addr, i); xhci_warn(xhci, "Port was marked as USB %u, " "duplicated as USB %u\n", xhci->port_array[i], major_revision); /* Only adjust the roothub port counts if we haven't * found a similar duplicate. */ if (xhci->port_array[i] != major_revision && xhci->port_array[i] != DUPLICATE_ENTRY) { if (xhci->port_array[i] == 0x03) xhci->num_usb3_ports--; else xhci->num_usb2_ports--; xhci->port_array[i] = DUPLICATE_ENTRY; } /* FIXME: Should we disable the port? */ continue; } xhci->port_array[i] = major_revision; if (major_revision == 0x03) xhci->num_usb3_ports++; else xhci->num_usb2_ports++; } /* FIXME: Should we disable ports not in the Extended Capabilities? */ } /* * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that * specify what speeds each port is supposed to be. We can't count on the port * speed bits in the PORTSC register being correct until a device is connected, * but we need to set up the two fake roothubs with the correct number of USB * 3.0 and USB 2.0 ports at host controller initialization time. */ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) { __le32 __iomem *addr; u32 offset; unsigned int num_ports; int i, j, port_index; addr = &xhci->cap_regs->hcc_params; offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr)); if (offset == 0) { xhci_err(xhci, "No Extended Capability registers, " "unable to set up roothub.\n"); return -ENODEV; } num_ports = HCS_MAX_PORTS(xhci->hcs_params1); xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags); if (!xhci->port_array) return -ENOMEM; xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags); if (!xhci->rh_bw) return -ENOMEM; for (i = 0; i < num_ports; i++) { struct xhci_interval_bw_table *bw_table; INIT_LIST_HEAD(&xhci->rh_bw[i].tts); bw_table = &xhci->rh_bw[i].bw_table; for (j = 0; j < XHCI_MAX_INTERVAL; j++) INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints); } /* * For whatever reason, the first capability offset is from the * capability register base, not from the HCCPARAMS register. * See section 5.3.6 for offset calculation. */ addr = &xhci->cap_regs->hc_capbase + offset; while (1) { u32 cap_id; cap_id = xhci_readl(xhci, addr); if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL) xhci_add_in_port(xhci, num_ports, addr, (u8) XHCI_EXT_PORT_MAJOR(cap_id)); offset = XHCI_EXT_CAPS_NEXT(cap_id); if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports) == num_ports) break; /* * Once you're into the Extended Capabilities, the offset is * always relative to the register holding the offset. */ addr += offset; } if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) { xhci_warn(xhci, "No ports on the roothubs?\n"); return -ENODEV; } xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n", xhci->num_usb2_ports, xhci->num_usb3_ports); /* Place limits on the number of roothub ports so that the hub * descriptors aren't longer than the USB core will allocate. */ if (xhci->num_usb3_ports > 15) { xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n"); xhci->num_usb3_ports = 15; } if (xhci->num_usb2_ports > USB_MAXCHILDREN) { xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n", USB_MAXCHILDREN); xhci->num_usb2_ports = USB_MAXCHILDREN; } /* * Note we could have all USB 3.0 ports, or all USB 2.0 ports. * Not sure how the USB core will handle a hub with no ports... */ if (xhci->num_usb2_ports) { xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)* xhci->num_usb2_ports, flags); if (!xhci->usb2_ports) return -ENOMEM; port_index = 0; for (i = 0; i < num_ports; i++) { if (xhci->port_array[i] == 0x03 || xhci->port_array[i] == 0 || xhci->port_array[i] == DUPLICATE_ENTRY) continue; xhci->usb2_ports[port_index] = &xhci->op_regs->port_status_base + NUM_PORT_REGS*i; xhci_dbg(xhci, "USB 2.0 port at index %u, " "addr = %p\n", i, xhci->usb2_ports[port_index]); port_index++; if (port_index == xhci->num_usb2_ports) break; } } if (xhci->num_usb3_ports) { xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)* xhci->num_usb3_ports, flags); if (!xhci->usb3_ports) return -ENOMEM; port_index = 0; for (i = 0; i < num_ports; i++) if (xhci->port_array[i] == 0x03) { xhci->usb3_ports[port_index] = &xhci->op_regs->port_status_base + NUM_PORT_REGS*i; xhci_dbg(xhci, "USB 3.0 port at index %u, " "addr = %p\n", i, xhci->usb3_ports[port_index]); port_index++; if (port_index == xhci->num_usb3_ports) break; } } return 0; } int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { dma_addr_t dma; struct device *dev = xhci_to_hcd(xhci)->self.controller; unsigned int val, val2; u64 val_64; struct xhci_segment *seg; u32 page_size, temp; int i; page_size = xhci_readl(xhci, &xhci->op_regs->page_size); xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); for (i = 0; i < 16; i++) { if ((0x1 & page_size) != 0) break; page_size = page_size >> 1; } if (i < 16) xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); else xhci_warn(xhci, "WARN: no supported page size\n"); /* Use 4K pages, since that's common and the minimum the HC supports */ xhci->page_shift = 12; xhci->page_size = 1 << xhci->page_shift; xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); /* * Program the Number of Device Slots Enabled field in the CONFIG * register with the max value of slots the HC can handle. */ val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", (unsigned int) val); val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); val |= (val2 & ~HCS_SLOTS_MASK); xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", (unsigned int) val); xhci_writel(xhci, val, &xhci->op_regs->config_reg); /* * Section 5.4.8 - doorbell array must be * "physically contiguous and 64-byte (cache line) aligned". */ xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, GFP_KERNEL); if (!xhci->dcbaa) goto fail; memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); xhci->dcbaa->dma = dma; xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); /* * Initialize the ring segment pool. The ring must be a contiguous * structure comprised of TRBs. The TRBs must be 16 byte aligned, * however, the command ring segment needs 64-byte aligned segments, * so we pick the greater alignment need. */ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, SEGMENT_SIZE, 64, xhci->page_size); /* See Table 46 and Note on Figure 55 */ xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2112, 64, xhci->page_size); if (!xhci->segment_pool || !xhci->device_pool) goto fail; /* Linear stream context arrays don't have any boundary restrictions, * and only need to be 16-byte aligned. */ xhci->small_streams_pool = dma_pool_create("xHCI 256 byte stream ctx arrays", dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); xhci->medium_streams_pool = dma_pool_create("xHCI 1KB stream ctx arrays", dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE * will be allocated with dma_alloc_coherent() */ if (!xhci->small_streams_pool || !xhci->medium_streams_pool) goto fail; /* Set up the command ring to have one segments for now. */ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags); if (!xhci->cmd_ring) goto fail; xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); xhci_dbg(xhci, "First segment DMA is 0x%llx\n", (unsigned long long)xhci->cmd_ring->first_seg->dma); /* Set the address in the Command Ring Control register */ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | xhci->cmd_ring->cycle_state; xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); xhci_dbg_cmd_ptrs(xhci); val = xhci_readl(xhci, &xhci->cap_regs->db_off); val &= DBOFF_MASK; xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" " from cap regs base addr\n", val); xhci->dba = (void __iomem *) xhci->cap_regs + val; xhci_dbg_regs(xhci); xhci_print_run_regs(xhci); /* Set ir_set to interrupt register set 0 */ xhci->ir_set = &xhci->run_regs->ir_set[0]; /* * Event ring setup: Allocate a normal ring, but also setup * the event ring segment table (ERST). Section 4.9.3. */ xhci_dbg(xhci, "// Allocating event ring\n"); xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, flags); if (!xhci->event_ring) goto fail; if (xhci_check_trb_in_td_math(xhci, flags) < 0) goto fail; xhci->erst.entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma, GFP_KERNEL); if (!xhci->erst.entries) goto fail; xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", (unsigned long long)dma); memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); xhci->erst.num_entries = ERST_NUM_SEGS; xhci->erst.erst_dma_addr = dma; xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", xhci->erst.num_entries, xhci->erst.entries, (unsigned long long)xhci->erst.erst_dma_addr); /* set ring base address and size for each segment table entry */ for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { struct xhci_erst_entry *entry = &xhci->erst.entries[val]; entry->seg_addr = cpu_to_le64(seg->dma); entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); entry->rsvd = 0; seg = seg->next; } /* set ERST count with the number of entries in the segment table */ val = xhci_readl(xhci, &xhci->ir_set->erst_size); val &= ERST_SIZE_MASK; val |= ERST_NUM_SEGS; xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", val); xhci_writel(xhci, val, &xhci->ir_set->erst_size); xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); /* set the segment table base address */ xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", (unsigned long long)xhci->erst.erst_dma_addr); val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); val_64 &= ERST_PTR_MASK; val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); /* Set the event ring dequeue address */ xhci_set_hc_event_deq(xhci); xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); xhci_print_ir_set(xhci, 0); /* * XXX: Might need to set the Interrupter Moderation Register to * something other than the default (~1ms minimum between interrupts). * See section 5.5.1.2. */ init_completion(&xhci->addr_dev); for (i = 0; i < MAX_HC_SLOTS; ++i) xhci->devs[i] = NULL; for (i = 0; i < USB_MAXCHILDREN; ++i) { xhci->bus_state[0].resume_done[i] = 0; xhci->bus_state[1].resume_done[i] = 0; } if (scratchpad_alloc(xhci, flags)) goto fail; if (xhci_setup_port_arrays(xhci, flags)) goto fail; INIT_LIST_HEAD(&xhci->lpm_failed_devs); /* Enable USB 3.0 device notifications for function remote wake, which * is necessary for allowing USB 3.0 devices to do remote wakeup from * U3 (device suspend). */ temp = xhci_readl(xhci, &xhci->op_regs->dev_notification); temp &= ~DEV_NOTE_MASK; temp |= DEV_NOTE_FWAKE; xhci_writel(xhci, temp, &xhci->op_regs->dev_notification); return 0; fail: xhci_warn(xhci, "Couldn't initialize memory\n"); xhci_halt(xhci); xhci_reset(xhci); xhci_mem_cleanup(xhci); return -ENOMEM; }
gpl-2.0
KyLinOS/android_kernel_motorola_omap4-common
arch/arm/mach-s3c2440/mach-rx1950.c
2823
19400
/* linux/arch/arm/mach-s3c2440/mach-rx1950.c * * Copyright (c) 2006-2009 Victor Chukhantsev, Denis Grigoriev, * Copyright (c) 2007-2010 Vasily Khoruzhick * * based on smdk2440 written by Ben Dooks * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/memblock.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/serial_core.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/sysdev.h> #include <linux/pda_power.h> #include <linux/pwm_backlight.h> #include <linux/pwm.h> #include <linux/s3c_adc_battery.h> #include <linux/leds.h> #include <linux/i2c.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mmc/host.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach-types.h> #include <mach/regs-gpio.h> #include <mach/regs-gpioj.h> #include <mach/h1940.h> #include <mach/fb.h> #include <plat/clock.h> #include <plat/regs-serial.h> #include <plat/regs-iic.h> #include <plat/mci.h> #include <plat/udc.h> #include <plat/nand.h> #include <plat/iic.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/pm.h> #include <plat/irq.h> #include <plat/ts.h> #include <sound/uda1380.h> #define LCD_PWM_PERIOD 192960 #define LCD_PWM_DUTY 127353 static struct map_desc rx1950_iodesc[] __initdata = { }; static struct s3c24xx_uart_clksrc rx1950_serial_clocks[] = { [0] = { .name = "fclk", .divisor = 0x0a, .min_baud = 0, .max_baud = 0, }, }; static struct s3c2410_uartcfg rx1950_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, .clocks = rx1950_serial_clocks, .clocks_size = ARRAY_SIZE(rx1950_serial_clocks), }, [1] = { .hwport = 1, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, .clocks = rx1950_serial_clocks, .clocks_size = ARRAY_SIZE(rx1950_serial_clocks), }, /* IR port */ [2] = { .hwport = 2, .flags = 0, .ucon = 0x3c5, .ulcon = 0x43, .ufcon = 0xf1, .clocks = rx1950_serial_clocks, .clocks_size = ARRAY_SIZE(rx1950_serial_clocks), }, }; static struct s3c2410fb_display rx1950_display = { .type = S3C2410_LCDCON1_TFT, .width = 240, .height = 320, .xres = 240, .yres = 320, .bpp = 16, .pixclock = 260000, .left_margin = 10, .right_margin = 20, .hsync_len = 10, .upper_margin = 2, .lower_margin = 2, .vsync_len = 2, .lcdcon5 = S3C2410_LCDCON5_FRM565 | S3C2410_LCDCON5_INVVCLK | S3C2410_LCDCON5_INVVLINE | S3C2410_LCDCON5_INVVFRAME | S3C2410_LCDCON5_HWSWP | (0x02 << 13) | (0x02 << 15), }; static int power_supply_init(struct device *dev) { return gpio_request(S3C2410_GPF(2), "cable plugged"); } static int rx1950_is_ac_online(void) { return !gpio_get_value(S3C2410_GPF(2)); } static void power_supply_exit(struct device *dev) { gpio_free(S3C2410_GPF(2)); } static char *rx1950_supplicants[] = { "main-battery" }; static struct pda_power_pdata power_supply_info = { .init = power_supply_init, .is_ac_online = rx1950_is_ac_online, .exit = power_supply_exit, .supplied_to = rx1950_supplicants, .num_supplicants = ARRAY_SIZE(rx1950_supplicants), }; static struct resource power_supply_resources[] = { [0] = { .name = "ac", .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE, .start = IRQ_EINT2, .end = IRQ_EINT2, }, }; static struct platform_device power_supply = { .name = "pda-power", .id = -1, .dev = { .platform_data = &power_supply_info, }, .resource = power_supply_resources, .num_resources = ARRAY_SIZE(power_supply_resources), }; static const struct s3c_adc_bat_thresh bat_lut_noac[] = { { .volt = 4100, .cur = 156, .level = 100}, { .volt = 4050, .cur = 156, .level = 95}, { .volt = 4025, .cur = 141, .level = 90}, { .volt = 3995, .cur = 144, .level = 85}, { .volt = 3957, .cur = 162, .level = 80}, { .volt = 3931, .cur = 147, .level = 75}, { .volt = 3902, .cur = 147, .level = 70}, { .volt = 3863, .cur = 153, .level = 65}, { .volt = 3838, .cur = 150, .level = 60}, { .volt = 3800, .cur = 153, .level = 55}, { .volt = 3765, .cur = 153, .level = 50}, { .volt = 3748, .cur = 172, .level = 45}, { .volt = 3740, .cur = 153, .level = 40}, { .volt = 3714, .cur = 175, .level = 35}, { .volt = 3710, .cur = 156, .level = 30}, { .volt = 3963, .cur = 156, .level = 25}, { .volt = 3672, .cur = 178, .level = 20}, { .volt = 3651, .cur = 178, .level = 15}, { .volt = 3629, .cur = 178, .level = 10}, { .volt = 3612, .cur = 162, .level = 5}, { .volt = 3605, .cur = 162, .level = 0}, }; static const struct s3c_adc_bat_thresh bat_lut_acin[] = { { .volt = 4200, .cur = 0, .level = 100}, { .volt = 4190, .cur = 0, .level = 99}, { .volt = 4178, .cur = 0, .level = 95}, { .volt = 4110, .cur = 0, .level = 70}, { .volt = 4076, .cur = 0, .level = 65}, { .volt = 4046, .cur = 0, .level = 60}, { .volt = 4021, .cur = 0, .level = 55}, { .volt = 3999, .cur = 0, .level = 50}, { .volt = 3982, .cur = 0, .level = 45}, { .volt = 3965, .cur = 0, .level = 40}, { .volt = 3957, .cur = 0, .level = 35}, { .volt = 3948, .cur = 0, .level = 30}, { .volt = 3936, .cur = 0, .level = 25}, { .volt = 3927, .cur = 0, .level = 20}, { .volt = 3906, .cur = 0, .level = 15}, { .volt = 3880, .cur = 0, .level = 10}, { .volt = 3829, .cur = 0, .level = 5}, { .volt = 3820, .cur = 0, .level = 0}, }; int rx1950_bat_init(void) { int ret; ret = gpio_request(S3C2410_GPJ(2), "rx1950-charger-enable-1"); if (ret) goto err_gpio1; ret = gpio_request(S3C2410_GPJ(3), "rx1950-charger-enable-2"); if (ret) goto err_gpio2; return 0; err_gpio2: gpio_free(S3C2410_GPJ(2)); err_gpio1: return ret; } void rx1950_bat_exit(void) { gpio_free(S3C2410_GPJ(2)); gpio_free(S3C2410_GPJ(3)); } void rx1950_enable_charger(void) { gpio_direction_output(S3C2410_GPJ(2), 1); gpio_direction_output(S3C2410_GPJ(3), 1); } void rx1950_disable_charger(void) { gpio_direction_output(S3C2410_GPJ(2), 0); gpio_direction_output(S3C2410_GPJ(3), 0); } DEFINE_SPINLOCK(rx1950_blink_spin); static int rx1950_led_blink_set(unsigned gpio, int state, unsigned long *delay_on, unsigned long *delay_off) { int blink_gpio, check_gpio; switch (gpio) { case S3C2410_GPA(6): blink_gpio = S3C2410_GPA(4); check_gpio = S3C2410_GPA(3); break; case S3C2410_GPA(7): blink_gpio = S3C2410_GPA(3); check_gpio = S3C2410_GPA(4); break; default: return -EINVAL; break; } if (delay_on && delay_off && !*delay_on && !*delay_off) *delay_on = *delay_off = 500; spin_lock(&rx1950_blink_spin); switch (state) { case GPIO_LED_NO_BLINK_LOW: case GPIO_LED_NO_BLINK_HIGH: if (!gpio_get_value(check_gpio)) gpio_set_value(S3C2410_GPJ(6), 0); gpio_set_value(blink_gpio, 0); gpio_set_value(gpio, state); break; case GPIO_LED_BLINK: gpio_set_value(gpio, 0); gpio_set_value(S3C2410_GPJ(6), 1); gpio_set_value(blink_gpio, 1); break; } spin_unlock(&rx1950_blink_spin); return 0; } static struct gpio_led rx1950_leds_desc[] = { { .name = "Green", .default_trigger = "main-battery-full", .gpio = S3C2410_GPA(6), .retain_state_suspended = 1, }, { .name = "Red", .default_trigger = "main-battery-charging-blink-full-solid", .gpio = S3C2410_GPA(7), .retain_state_suspended = 1, }, { .name = "Blue", .default_trigger = "rx1950-acx-mem", .gpio = S3C2410_GPA(11), .retain_state_suspended = 1, }, }; static struct gpio_led_platform_data rx1950_leds_pdata = { .num_leds = ARRAY_SIZE(rx1950_leds_desc), .leds = rx1950_leds_desc, .gpio_blink_set = rx1950_led_blink_set, }; static struct platform_device rx1950_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &rx1950_leds_pdata, }, }; static struct s3c_adc_bat_pdata rx1950_bat_cfg = { .init = rx1950_bat_init, .exit = rx1950_bat_exit, .enable_charger = rx1950_enable_charger, .disable_charger = rx1950_disable_charger, .gpio_charge_finished = S3C2410_GPF(3), .lut_noac = bat_lut_noac, .lut_noac_cnt = ARRAY_SIZE(bat_lut_noac), .lut_acin = bat_lut_acin, .lut_acin_cnt = ARRAY_SIZE(bat_lut_acin), .volt_channel = 0, .current_channel = 1, .volt_mult = 4235, .current_mult = 2900, .internal_impedance = 200, }; static struct platform_device rx1950_battery = { .name = "s3c-adc-battery", .id = -1, .dev = { .parent = &s3c_device_adc.dev, .platform_data = &rx1950_bat_cfg, }, }; static struct s3c2410fb_mach_info rx1950_lcd_cfg = { .displays = &rx1950_display, .num_displays = 1, .default_display = 0, .lpcsel = 0x02, .gpccon = 0xaa9556a9, .gpccon_mask = 0xffc003fc, .gpcup = 0x0000ffff, .gpcup_mask = 0xffffffff, .gpdcon = 0xaa90aaa1, .gpdcon_mask = 0xffc0fff0, .gpdup = 0x0000fcfd, .gpdup_mask = 0xffffffff, }; static struct pwm_device *lcd_pwm; void rx1950_lcd_power(int enable) { int i; static int enabled; if (enabled == enable) return; if (!enable) { /* GPC11-GPC15->OUTPUT */ for (i = 11; i < 16; i++) gpio_direction_output(S3C2410_GPC(i), 1); /* Wait a bit here... */ mdelay(100); /* GPD2-GPD7->OUTPUT */ /* GPD11-GPD15->OUTPUT */ /* GPD2-GPD7->1, GPD11-GPD15->1 */ for (i = 2; i < 8; i++) gpio_direction_output(S3C2410_GPD(i), 1); for (i = 11; i < 16; i++) gpio_direction_output(S3C2410_GPD(i), 1); /* Wait a bit here...*/ mdelay(100); /* GPB0->OUTPUT, GPB0->0 */ gpio_direction_output(S3C2410_GPB(0), 0); /* GPC1-GPC4->OUTPUT, GPC1-4->0 */ for (i = 1; i < 5; i++) gpio_direction_output(S3C2410_GPC(i), 0); /* GPC15-GPC11->0 */ for (i = 11; i < 16; i++) gpio_direction_output(S3C2410_GPC(i), 0); /* GPD15-GPD11->0, GPD2->GPD7->0 */ for (i = 11; i < 16; i++) gpio_direction_output(S3C2410_GPD(i), 0); for (i = 2; i < 8; i++) gpio_direction_output(S3C2410_GPD(i), 0); /* GPC6->0, GPC7->0, GPC5->0 */ gpio_direction_output(S3C2410_GPC(6), 0); gpio_direction_output(S3C2410_GPC(7), 0); gpio_direction_output(S3C2410_GPC(5), 0); /* GPB1->OUTPUT, GPB1->0 */ gpio_direction_output(S3C2410_GPB(1), 0); pwm_config(lcd_pwm, 0, LCD_PWM_PERIOD); pwm_disable(lcd_pwm); /* GPC0->0, GPC10->0 */ gpio_direction_output(S3C2410_GPC(0), 0); gpio_direction_output(S3C2410_GPC(10), 0); } else { pwm_config(lcd_pwm, LCD_PWM_DUTY, LCD_PWM_PERIOD); pwm_enable(lcd_pwm); gpio_direction_output(S3C2410_GPC(0), 1); gpio_direction_output(S3C2410_GPC(5), 1); s3c_gpio_cfgpin(S3C2410_GPB(1), S3C2410_GPB1_TOUT1); gpio_direction_output(S3C2410_GPC(7), 1); for (i = 1; i < 5; i++) s3c_gpio_cfgpin(S3C2410_GPC(i), S3C_GPIO_SFN(2)); for (i = 11; i < 16; i++) s3c_gpio_cfgpin(S3C2410_GPC(i), S3C_GPIO_SFN(2)); for (i = 2; i < 8; i++) s3c_gpio_cfgpin(S3C2410_GPD(i), S3C_GPIO_SFN(2)); for (i = 11; i < 16; i++) s3c_gpio_cfgpin(S3C2410_GPD(i), S3C_GPIO_SFN(2)); gpio_direction_output(S3C2410_GPC(10), 1); gpio_direction_output(S3C2410_GPC(6), 1); } enabled = enable; } static void rx1950_bl_power(int enable) { static int enabled; if (enabled == enable) return; if (!enable) { gpio_direction_output(S3C2410_GPB(0), 0); } else { /* LED driver need a "push" to power on */ gpio_direction_output(S3C2410_GPB(0), 1); /* Warm up backlight for one period of PWM. * Without this trick its almost impossible to * enable backlight with low brightness value */ ndelay(48000); s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPB0_TOUT0); } enabled = enable; } static int rx1950_backlight_init(struct device *dev) { WARN_ON(gpio_request(S3C2410_GPB(0), "Backlight")); lcd_pwm = pwm_request(1, "RX1950 LCD"); if (IS_ERR(lcd_pwm)) { dev_err(dev, "Unable to request PWM for LCD power!\n"); return PTR_ERR(lcd_pwm); } rx1950_lcd_power(1); rx1950_bl_power(1); return 0; } static void rx1950_backlight_exit(struct device *dev) { rx1950_bl_power(0); rx1950_lcd_power(0); pwm_free(lcd_pwm); gpio_free(S3C2410_GPB(0)); } static int rx1950_backlight_notify(struct device *dev, int brightness) { if (!brightness) { rx1950_bl_power(0); rx1950_lcd_power(0); } else { rx1950_lcd_power(1); rx1950_bl_power(1); } return brightness; } static struct platform_pwm_backlight_data rx1950_backlight_data = { .pwm_id = 0, .max_brightness = 24, .dft_brightness = 4, .pwm_period_ns = 48000, .init = rx1950_backlight_init, .notify = rx1950_backlight_notify, .exit = rx1950_backlight_exit, }; static struct platform_device rx1950_backlight = { .name = "pwm-backlight", .dev = { .parent = &s3c_device_timer[0].dev, .platform_data = &rx1950_backlight_data, }, }; static void rx1950_set_mmc_power(unsigned char power_mode, unsigned short vdd) { switch (power_mode) { case MMC_POWER_OFF: gpio_direction_output(S3C2410_GPJ(1), 0); break; case MMC_POWER_UP: case MMC_POWER_ON: gpio_direction_output(S3C2410_GPJ(1), 1); break; default: break; } } static struct s3c24xx_mci_pdata rx1950_mmc_cfg __initdata = { .gpio_detect = S3C2410_GPF(5), .gpio_wprotect = S3C2410_GPH(8), .set_power = rx1950_set_mmc_power, .ocr_avail = MMC_VDD_32_33, }; static struct mtd_partition rx1950_nand_part[] = { [0] = { .name = "Boot0", .offset = 0, .size = 0x4000, .mask_flags = MTD_WRITEABLE, }, [1] = { .name = "Boot1", .offset = MTDPART_OFS_APPEND, .size = 0x40000, .mask_flags = MTD_WRITEABLE, }, [2] = { .name = "Kernel", .offset = MTDPART_OFS_APPEND, .size = 0x300000, .mask_flags = 0, }, [3] = { .name = "Filesystem", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, .mask_flags = 0, }, }; static struct s3c2410_nand_set rx1950_nand_sets[] = { [0] = { .name = "Internal", .nr_chips = 1, .nr_partitions = ARRAY_SIZE(rx1950_nand_part), .partitions = rx1950_nand_part, }, }; static struct s3c2410_platform_nand rx1950_nand_info = { .tacls = 25, .twrph0 = 50, .twrph1 = 15, .nr_sets = ARRAY_SIZE(rx1950_nand_sets), .sets = rx1950_nand_sets, }; static struct s3c2410_udc_mach_info rx1950_udc_cfg __initdata = { .vbus_pin = S3C2410_GPG(5), .vbus_pin_inverted = 1, .pullup_pin = S3C2410_GPJ(5), }; static struct s3c2410_ts_mach_info rx1950_ts_cfg __initdata = { .delay = 10000, .presc = 49, .oversampling_shift = 3, }; static struct gpio_keys_button rx1950_gpio_keys_table[] = { { .code = KEY_POWER, .gpio = S3C2410_GPF(0), .active_low = 1, .desc = "Power button", .wakeup = 1, }, { .code = KEY_F5, .gpio = S3C2410_GPF(7), .active_low = 1, .desc = "Record button", }, { .code = KEY_F1, .gpio = S3C2410_GPG(0), .active_low = 1, .desc = "Calendar button", }, { .code = KEY_F2, .gpio = S3C2410_GPG(2), .active_low = 1, .desc = "Contacts button", }, { .code = KEY_F3, .gpio = S3C2410_GPG(3), .active_low = 1, .desc = "Mail button", }, { .code = KEY_F4, .gpio = S3C2410_GPG(7), .active_low = 1, .desc = "WLAN button", }, { .code = KEY_LEFT, .gpio = S3C2410_GPG(10), .active_low = 1, .desc = "Left button", }, { .code = KEY_RIGHT, .gpio = S3C2410_GPG(11), .active_low = 1, .desc = "Right button", }, { .code = KEY_UP, .gpio = S3C2410_GPG(4), .active_low = 1, .desc = "Up button", }, { .code = KEY_DOWN, .gpio = S3C2410_GPG(6), .active_low = 1, .desc = "Down button", }, { .code = KEY_ENTER, .gpio = S3C2410_GPG(9), .active_low = 1, .desc = "Ok button" }, }; static struct gpio_keys_platform_data rx1950_gpio_keys_data = { .buttons = rx1950_gpio_keys_table, .nbuttons = ARRAY_SIZE(rx1950_gpio_keys_table), }; static struct platform_device rx1950_device_gpiokeys = { .name = "gpio-keys", .dev.platform_data = &rx1950_gpio_keys_data, }; static struct uda1380_platform_data uda1380_info = { .gpio_power = S3C2410_GPJ(0), .gpio_reset = S3C2410_GPD(0), .dac_clk = UDA1380_DAC_CLK_SYSCLK, }; static struct i2c_board_info rx1950_i2c_devices[] = { { I2C_BOARD_INFO("uda1380", 0x1a), .platform_data = &uda1380_info, }, }; static struct platform_device *rx1950_devices[] __initdata = { &s3c_device_lcd, &s3c_device_wdt, &s3c_device_i2c0, &s3c_device_iis, &samsung_asoc_dma, &s3c_device_usbgadget, &s3c_device_rtc, &s3c_device_nand, &s3c_device_sdi, &s3c_device_adc, &s3c_device_ts, &s3c_device_timer[0], &s3c_device_timer[1], &rx1950_backlight, &rx1950_device_gpiokeys, &power_supply, &rx1950_battery, &rx1950_leds, }; static struct clk *rx1950_clocks[] __initdata = { &s3c24xx_clkout0, &s3c24xx_clkout1, }; static void __init rx1950_map_io(void) { s3c24xx_clkout0.parent = &clk_h; s3c24xx_clkout1.parent = &clk_f; s3c24xx_register_clocks(rx1950_clocks, ARRAY_SIZE(rx1950_clocks)); s3c24xx_init_io(rx1950_iodesc, ARRAY_SIZE(rx1950_iodesc)); s3c24xx_init_clocks(16934000); s3c24xx_init_uarts(rx1950_uartcfgs, ARRAY_SIZE(rx1950_uartcfgs)); /* setup PM */ #ifdef CONFIG_PM_H1940 memcpy(phys_to_virt(H1940_SUSPEND_RESUMEAT), h1940_pm_return, 8); #endif s3c_pm_init(); } static void __init rx1950_init_machine(void) { int i; s3c24xx_fb_set_platdata(&rx1950_lcd_cfg); s3c24xx_udc_set_platdata(&rx1950_udc_cfg); s3c24xx_ts_set_platdata(&rx1950_ts_cfg); s3c24xx_mci_set_platdata(&rx1950_mmc_cfg); s3c_i2c0_set_platdata(NULL); s3c_nand_set_platdata(&rx1950_nand_info); /* Turn off suspend on both USB ports, and switch the * selectable USB port to USB device mode. */ s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST | S3C2410_MISCCR_USBSUSPND0 | S3C2410_MISCCR_USBSUSPND1, 0x0); /* mmc power is disabled by default */ WARN_ON(gpio_request(S3C2410_GPJ(1), "MMC power")); gpio_direction_output(S3C2410_GPJ(1), 0); for (i = 0; i < 8; i++) WARN_ON(gpio_request(S3C2410_GPC(i), "LCD power")); for (i = 10; i < 16; i++) WARN_ON(gpio_request(S3C2410_GPC(i), "LCD power")); for (i = 2; i < 8; i++) WARN_ON(gpio_request(S3C2410_GPD(i), "LCD power")); for (i = 11; i < 16; i++) WARN_ON(gpio_request(S3C2410_GPD(i), "LCD power")); WARN_ON(gpio_request(S3C2410_GPB(1), "LCD power")); WARN_ON(gpio_request(S3C2410_GPA(3), "Red blink")); WARN_ON(gpio_request(S3C2410_GPA(4), "Green blink")); WARN_ON(gpio_request(S3C2410_GPJ(6), "LED blink")); gpio_direction_output(S3C2410_GPA(3), 0); gpio_direction_output(S3C2410_GPA(4), 0); gpio_direction_output(S3C2410_GPJ(6), 0); platform_add_devices(rx1950_devices, ARRAY_SIZE(rx1950_devices)); i2c_register_board_info(0, rx1950_i2c_devices, ARRAY_SIZE(rx1950_i2c_devices)); } /* H1940 and RX3715 need to reserve this for suspend */ static void __init rx1950_reserve(void) { memblock_reserve(0x30003000, 0x1000); memblock_reserve(0x30081000, 0x1000); } MACHINE_START(RX1950, "HP iPAQ RX1950") /* Maintainers: Vasily Khoruzhick */ .boot_params = S3C2410_SDRAM_PA + 0x100, .map_io = rx1950_map_io, .reserve = rx1950_reserve, .init_irq = s3c24xx_init_irq, .init_machine = rx1950_init_machine, .timer = &s3c24xx_timer, MACHINE_END
gpl-2.0
UberSlim/KernelSanders_L90
net/ipv4/ipmr.c
3079
58755
/* * IP multicast routing support for mrouted 3.6/3.8 * * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk> * Linux Consultancy and Custom Driver Development * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: * Michael Chastain : Incorrect size of copying. * Alan Cox : Added the cache manager code * Alan Cox : Fixed the clone/copy bug and device race. * Mike McLagan : Routing by source * Malcolm Beattie : Buffer handling fixes. * Alexey Kuznetsov : Double buffer free and other fixes. * SVR Anand : Fixed several multicast bugs and problems. * Alexey Kuznetsov : Status, optimisations and more. * Brad Parker : Better behaviour on mrouted upcall * overflow. * Carlos Picoto : PIMv1 Support * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header * Relax this requirement to work with older peers. * */ #include <asm/uaccess.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/igmp.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mroute.h> #include <linux/init.h> #include <linux/if_ether.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <net/route.h> #include <net/sock.h> #include <net/icmp.h> #include <net/udp.h> #include <net/raw.h> #include <linux/notifier.h> #include <linux/if_arp.h> #include <linux/netfilter_ipv4.h> #include <linux/compat.h> #include <linux/export.h> #include <net/ipip.h> #include <net/checksum.h> #include <net/netlink.h> #include <net/fib_rules.h> #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) #define CONFIG_IP_PIMSM 1 #endif struct mr_table { struct list_head list; #ifdef CONFIG_NET_NS struct net *net; #endif u32 id; struct sock __rcu *mroute_sk; struct timer_list ipmr_expire_timer; struct list_head mfc_unres_queue; struct list_head mfc_cache_array[MFC_LINES]; struct vif_device vif_table[MAXVIFS]; int maxvif; atomic_t cache_resolve_queue_len; int mroute_do_assert; int mroute_do_pim; #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) int mroute_reg_vif_num; #endif }; struct ipmr_rule { struct fib_rule common; }; struct ipmr_result { struct mr_table *mrt; }; /* Big lock, protecting vif table, mrt cache and mroute socket state. * Note that the changes are semaphored via rtnl_lock. */ static DEFINE_RWLOCK(mrt_lock); /* * Multicast router control variables */ #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL) /* Special spinlock for queue of unresolved entries */ static DEFINE_SPINLOCK(mfc_unres_lock); /* We return to original Alan's scheme. Hash table of resolved * entries is changed only in process context and protected * with weak lock mrt_lock. Queue of unresolved entries is protected * with strong spinlock mfc_unres_lock. * * In this case data path is free of exclusive locks at all. */ static struct kmem_cache *mrt_cachep __read_mostly; static struct mr_table *ipmr_new_table(struct net *net, u32 id); static int ip_mr_forward(struct net *net, struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *cache, int local); static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert); static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); static void ipmr_expire_process(unsigned long arg); #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES #define ipmr_for_each_table(mrt, net) \ list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list) static struct mr_table *ipmr_get_table(struct net *net, u32 id) { struct mr_table *mrt; ipmr_for_each_table(mrt, net) { if (mrt->id == id) return mrt; } return NULL; } static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, struct mr_table **mrt) { struct ipmr_result res; struct fib_lookup_arg arg = { .result = &res, }; int err; err = fib_rules_lookup(net->ipv4.mr_rules_ops, flowi4_to_flowi(flp4), 0, &arg); if (err < 0) return err; *mrt = res.mrt; return 0; } static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, int flags, struct fib_lookup_arg *arg) { struct ipmr_result *res = arg->result; struct mr_table *mrt; switch (rule->action) { case FR_ACT_TO_TBL: break; case FR_ACT_UNREACHABLE: return -ENETUNREACH; case FR_ACT_PROHIBIT: return -EACCES; case FR_ACT_BLACKHOLE: default: return -EINVAL; } mrt = ipmr_get_table(rule->fr_net, rule->table); if (mrt == NULL) return -EAGAIN; res->mrt = mrt; return 0; } static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) { return 1; } static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { FRA_GENERIC_POLICY, }; static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb) { return 0; } static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, struct nlattr **tb) { return 1; } static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh) { frh->dst_len = 0; frh->src_len = 0; frh->tos = 0; return 0; } static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = { .family = RTNL_FAMILY_IPMR, .rule_size = sizeof(struct ipmr_rule), .addr_size = sizeof(u32), .action = ipmr_rule_action, .match = ipmr_rule_match, .configure = ipmr_rule_configure, .compare = ipmr_rule_compare, .default_pref = fib_default_rule_pref, .fill = ipmr_rule_fill, .nlgroup = RTNLGRP_IPV4_RULE, .policy = ipmr_rule_policy, .owner = THIS_MODULE, }; static int __net_init ipmr_rules_init(struct net *net) { struct fib_rules_ops *ops; struct mr_table *mrt; int err; ops = fib_rules_register(&ipmr_rules_ops_template, net); if (IS_ERR(ops)) return PTR_ERR(ops); INIT_LIST_HEAD(&net->ipv4.mr_tables); mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); if (mrt == NULL) { err = -ENOMEM; goto err1; } err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0); if (err < 0) goto err2; net->ipv4.mr_rules_ops = ops; return 0; err2: kfree(mrt); err1: fib_rules_unregister(ops); return err; } static void __net_exit ipmr_rules_exit(struct net *net) { struct mr_table *mrt, *next; list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { list_del(&mrt->list); kfree(mrt); } fib_rules_unregister(net->ipv4.mr_rules_ops); } #else #define ipmr_for_each_table(mrt, net) \ for (mrt = net->ipv4.mrt; mrt; mrt = NULL) static struct mr_table *ipmr_get_table(struct net *net, u32 id) { return net->ipv4.mrt; } static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, struct mr_table **mrt) { *mrt = net->ipv4.mrt; return 0; } static int __net_init ipmr_rules_init(struct net *net) { net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); return net->ipv4.mrt ? 0 : -ENOMEM; } static void __net_exit ipmr_rules_exit(struct net *net) { kfree(net->ipv4.mrt); } #endif static struct mr_table *ipmr_new_table(struct net *net, u32 id) { struct mr_table *mrt; unsigned int i; mrt = ipmr_get_table(net, id); if (mrt != NULL) return mrt; mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); if (mrt == NULL) return NULL; write_pnet(&mrt->net, net); mrt->id = id; /* Forwarding cache */ for (i = 0; i < MFC_LINES; i++) INIT_LIST_HEAD(&mrt->mfc_cache_array[i]); INIT_LIST_HEAD(&mrt->mfc_unres_queue); setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, (unsigned long)mrt); #ifdef CONFIG_IP_PIMSM mrt->mroute_reg_vif_num = -1; #endif #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables); #endif return mrt; } /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) { struct net *net = dev_net(dev); dev_close(dev); dev = __dev_get_by_name(net, "tunl0"); if (dev) { const struct net_device_ops *ops = dev->netdev_ops; struct ifreq ifr; struct ip_tunnel_parm p; memset(&p, 0, sizeof(p)); p.iph.daddr = v->vifc_rmt_addr.s_addr; p.iph.saddr = v->vifc_lcl_addr.s_addr; p.iph.version = 4; p.iph.ihl = 5; p.iph.protocol = IPPROTO_IPIP; sprintf(p.name, "dvmrp%d", v->vifc_vifi); ifr.ifr_ifru.ifru_data = (__force void __user *)&p; if (ops->ndo_do_ioctl) { mm_segment_t oldfs = get_fs(); set_fs(KERNEL_DS); ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL); set_fs(oldfs); } } } static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) { struct net_device *dev; dev = __dev_get_by_name(net, "tunl0"); if (dev) { const struct net_device_ops *ops = dev->netdev_ops; int err; struct ifreq ifr; struct ip_tunnel_parm p; struct in_device *in_dev; memset(&p, 0, sizeof(p)); p.iph.daddr = v->vifc_rmt_addr.s_addr; p.iph.saddr = v->vifc_lcl_addr.s_addr; p.iph.version = 4; p.iph.ihl = 5; p.iph.protocol = IPPROTO_IPIP; sprintf(p.name, "dvmrp%d", v->vifc_vifi); ifr.ifr_ifru.ifru_data = (__force void __user *)&p; if (ops->ndo_do_ioctl) { mm_segment_t oldfs = get_fs(); set_fs(KERNEL_DS); err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL); set_fs(oldfs); } else { err = -EOPNOTSUPP; } dev = NULL; if (err == 0 && (dev = __dev_get_by_name(net, p.name)) != NULL) { dev->flags |= IFF_MULTICAST; in_dev = __in_dev_get_rtnl(dev); if (in_dev == NULL) goto failure; ipv4_devconf_setall(in_dev); IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; if (dev_open(dev)) goto failure; dev_hold(dev); } } return dev; failure: /* allow the register to be completed before unregistering. */ rtnl_unlock(); rtnl_lock(); unregister_netdevice(dev); return NULL; } #ifdef CONFIG_IP_PIMSM static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) { struct net *net = dev_net(dev); struct mr_table *mrt; struct flowi4 fl4 = { .flowi4_oif = dev->ifindex, .flowi4_iif = skb->skb_iif, .flowi4_mark = skb->mark, }; int err; err = ipmr_fib_lookup(net, &fl4, &mrt); if (err < 0) { kfree_skb(skb); return err; } read_lock(&mrt_lock); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT); read_unlock(&mrt_lock); kfree_skb(skb); return NETDEV_TX_OK; } static const struct net_device_ops reg_vif_netdev_ops = { .ndo_start_xmit = reg_vif_xmit, }; static void reg_vif_setup(struct net_device *dev) { dev->type = ARPHRD_PIMREG; dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; dev->flags = IFF_NOARP; dev->netdev_ops = &reg_vif_netdev_ops, dev->destructor = free_netdev; dev->features |= NETIF_F_NETNS_LOCAL; } static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) { struct net_device *dev; struct in_device *in_dev; char name[IFNAMSIZ]; if (mrt->id == RT_TABLE_DEFAULT) sprintf(name, "pimreg"); else sprintf(name, "pimreg%u", mrt->id); dev = alloc_netdev(0, name, reg_vif_setup); if (dev == NULL) return NULL; dev_net_set(dev, net); if (register_netdevice(dev)) { free_netdev(dev); return NULL; } dev->iflink = 0; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (!in_dev) { rcu_read_unlock(); goto failure; } ipv4_devconf_setall(in_dev); IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; rcu_read_unlock(); if (dev_open(dev)) goto failure; dev_hold(dev); return dev; failure: /* allow the register to be completed before unregistering. */ rtnl_unlock(); rtnl_lock(); unregister_netdevice(dev); return NULL; } #endif /* * Delete a VIF entry * @notify: Set to 1, if the caller is a notifier_call */ static int vif_delete(struct mr_table *mrt, int vifi, int notify, struct list_head *head) { struct vif_device *v; struct net_device *dev; struct in_device *in_dev; if (vifi < 0 || vifi >= mrt->maxvif) return -EADDRNOTAVAIL; v = &mrt->vif_table[vifi]; write_lock_bh(&mrt_lock); dev = v->dev; v->dev = NULL; if (!dev) { write_unlock_bh(&mrt_lock); return -EADDRNOTAVAIL; } #ifdef CONFIG_IP_PIMSM if (vifi == mrt->mroute_reg_vif_num) mrt->mroute_reg_vif_num = -1; #endif if (vifi + 1 == mrt->maxvif) { int tmp; for (tmp = vifi - 1; tmp >= 0; tmp--) { if (VIF_EXISTS(mrt, tmp)) break; } mrt->maxvif = tmp+1; } write_unlock_bh(&mrt_lock); dev_set_allmulti(dev, -1); in_dev = __in_dev_get_rtnl(dev); if (in_dev) { IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--; ip_rt_multicast_event(in_dev); } if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify) unregister_netdevice_queue(dev, head); dev_put(dev); return 0; } static void ipmr_cache_free_rcu(struct rcu_head *head) { struct mfc_cache *c = container_of(head, struct mfc_cache, rcu); kmem_cache_free(mrt_cachep, c); } static inline void ipmr_cache_free(struct mfc_cache *c) { call_rcu(&c->rcu, ipmr_cache_free_rcu); } /* Destroy an unresolved cache entry, killing queued skbs * and reporting error to netlink readers. */ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) { struct net *net = read_pnet(&mrt->net); struct sk_buff *skb; struct nlmsgerr *e; atomic_dec(&mrt->cache_resolve_queue_len); while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); nlh->nlmsg_type = NLMSG_ERROR; nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); skb_trim(skb, nlh->nlmsg_len); e = NLMSG_DATA(nlh); e->error = -ETIMEDOUT; memset(&e->msg, 0, sizeof(e->msg)); rtnl_unicast(skb, net, NETLINK_CB(skb).pid); } else { kfree_skb(skb); } } ipmr_cache_free(c); } /* Timer process for the unresolved queue. */ static void ipmr_expire_process(unsigned long arg) { struct mr_table *mrt = (struct mr_table *)arg; unsigned long now; unsigned long expires; struct mfc_cache *c, *next; if (!spin_trylock(&mfc_unres_lock)) { mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10); return; } if (list_empty(&mrt->mfc_unres_queue)) goto out; now = jiffies; expires = 10*HZ; list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { if (time_after(c->mfc_un.unres.expires, now)) { unsigned long interval = c->mfc_un.unres.expires - now; if (interval < expires) expires = interval; continue; } list_del(&c->list); ipmr_destroy_unres(mrt, c); } if (!list_empty(&mrt->mfc_unres_queue)) mod_timer(&mrt->ipmr_expire_timer, jiffies + expires); out: spin_unlock(&mfc_unres_lock); } /* Fill oifs list. It is called under write locked mrt_lock. */ static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache, unsigned char *ttls) { int vifi; cache->mfc_un.res.minvif = MAXVIFS; cache->mfc_un.res.maxvif = 0; memset(cache->mfc_un.res.ttls, 255, MAXVIFS); for (vifi = 0; vifi < mrt->maxvif; vifi++) { if (VIF_EXISTS(mrt, vifi) && ttls[vifi] && ttls[vifi] < 255) { cache->mfc_un.res.ttls[vifi] = ttls[vifi]; if (cache->mfc_un.res.minvif > vifi) cache->mfc_un.res.minvif = vifi; if (cache->mfc_un.res.maxvif <= vifi) cache->mfc_un.res.maxvif = vifi + 1; } } } static int vif_add(struct net *net, struct mr_table *mrt, struct vifctl *vifc, int mrtsock) { int vifi = vifc->vifc_vifi; struct vif_device *v = &mrt->vif_table[vifi]; struct net_device *dev; struct in_device *in_dev; int err; /* Is vif busy ? */ if (VIF_EXISTS(mrt, vifi)) return -EADDRINUSE; switch (vifc->vifc_flags) { #ifdef CONFIG_IP_PIMSM case VIFF_REGISTER: /* * Special Purpose VIF in PIM * All the packets will be sent to the daemon */ if (mrt->mroute_reg_vif_num >= 0) return -EADDRINUSE; dev = ipmr_reg_vif(net, mrt); if (!dev) return -ENOBUFS; err = dev_set_allmulti(dev, 1); if (err) { unregister_netdevice(dev); dev_put(dev); return err; } break; #endif case VIFF_TUNNEL: dev = ipmr_new_tunnel(net, vifc); if (!dev) return -ENOBUFS; err = dev_set_allmulti(dev, 1); if (err) { ipmr_del_tunnel(dev, vifc); dev_put(dev); return err; } break; case VIFF_USE_IFINDEX: case 0: if (vifc->vifc_flags == VIFF_USE_IFINDEX) { dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex); if (dev && __in_dev_get_rtnl(dev) == NULL) { dev_put(dev); return -EADDRNOTAVAIL; } } else { dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr); } if (!dev) return -EADDRNOTAVAIL; err = dev_set_allmulti(dev, 1); if (err) { dev_put(dev); return err; } break; default: return -EINVAL; } in_dev = __in_dev_get_rtnl(dev); if (!in_dev) { dev_put(dev); return -EADDRNOTAVAIL; } IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; ip_rt_multicast_event(in_dev); /* Fill in the VIF structures */ v->rate_limit = vifc->vifc_rate_limit; v->local = vifc->vifc_lcl_addr.s_addr; v->remote = vifc->vifc_rmt_addr.s_addr; v->flags = vifc->vifc_flags; if (!mrtsock) v->flags |= VIFF_STATIC; v->threshold = vifc->vifc_threshold; v->bytes_in = 0; v->bytes_out = 0; v->pkt_in = 0; v->pkt_out = 0; v->link = dev->ifindex; if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER)) v->link = dev->iflink; /* And finish update writing critical data */ write_lock_bh(&mrt_lock); v->dev = dev; #ifdef CONFIG_IP_PIMSM if (v->flags & VIFF_REGISTER) mrt->mroute_reg_vif_num = vifi; #endif if (vifi+1 > mrt->maxvif) mrt->maxvif = vifi+1; write_unlock_bh(&mrt_lock); return 0; } /* called with rcu_read_lock() */ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, __be32 origin, __be32 mcastgrp) { int line = MFC_HASH(mcastgrp, origin); struct mfc_cache *c; list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) { if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp) return c; } return NULL; } /* * Allocate a multicast cache entry */ static struct mfc_cache *ipmr_cache_alloc(void) { struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); if (c) c->mfc_un.res.minvif = MAXVIFS; return c; } static struct mfc_cache *ipmr_cache_alloc_unres(void) { struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); if (c) { skb_queue_head_init(&c->mfc_un.unres.unresolved); c->mfc_un.unres.expires = jiffies + 10*HZ; } return c; } /* * A cache entry has gone into a resolved state from queued */ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt, struct mfc_cache *uc, struct mfc_cache *c) { struct sk_buff *skb; struct nlmsgerr *e; /* Play the pending entries through our router */ while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; } else { nlh->nlmsg_type = NLMSG_ERROR; nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); skb_trim(skb, nlh->nlmsg_len); e = NLMSG_DATA(nlh); e->error = -EMSGSIZE; memset(&e->msg, 0, sizeof(e->msg)); } rtnl_unicast(skb, net, NETLINK_CB(skb).pid); } else { ip_mr_forward(net, mrt, skb, c, 0); } } } /* * Bounce a cache query up to mrouted. We could use netlink for this but mrouted * expects the following bizarre scheme. * * Called under mrt_lock. */ static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert) { struct sk_buff *skb; const int ihl = ip_hdrlen(pkt); struct igmphdr *igmp; struct igmpmsg *msg; struct sock *mroute_sk; int ret; #ifdef CONFIG_IP_PIMSM if (assert == IGMPMSG_WHOLEPKT) skb = skb_realloc_headroom(pkt, sizeof(struct iphdr)); else #endif skb = alloc_skb(128, GFP_ATOMIC); if (!skb) return -ENOBUFS; #ifdef CONFIG_IP_PIMSM if (assert == IGMPMSG_WHOLEPKT) { /* Ugly, but we have no choice with this interface. * Duplicate old header, fix ihl, length etc. * And all this only to mangle msg->im_msgtype and * to set msg->im_mbz to "mbz" :-) */ skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); skb_reset_transport_header(skb); msg = (struct igmpmsg *)skb_network_header(skb); memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); msg->im_msgtype = IGMPMSG_WHOLEPKT; msg->im_mbz = 0; msg->im_vif = mrt->mroute_reg_vif_num; ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + sizeof(struct iphdr)); } else #endif { /* Copy the IP header */ skb->network_header = skb->tail; skb_put(skb, ihl); skb_copy_to_linear_data(skb, pkt->data, ihl); ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ msg = (struct igmpmsg *)skb_network_header(skb); msg->im_vif = vifi; skb_dst_set(skb, dst_clone(skb_dst(pkt))); /* Add our header */ igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); igmp->type = msg->im_msgtype = assert; igmp->code = 0; ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */ skb->transport_header = skb->network_header; } rcu_read_lock(); mroute_sk = rcu_dereference(mrt->mroute_sk); if (mroute_sk == NULL) { rcu_read_unlock(); kfree_skb(skb); return -EINVAL; } /* Deliver to mrouted */ ret = sock_queue_rcv_skb(mroute_sk, skb); rcu_read_unlock(); if (ret < 0) { if (net_ratelimit()) pr_warn("mroute: pending queue full, dropping entries\n"); kfree_skb(skb); } return ret; } /* * Queue a packet for resolution. It gets locked cache entry! */ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb) { bool found = false; int err; struct mfc_cache *c; const struct iphdr *iph = ip_hdr(skb); spin_lock_bh(&mfc_unres_lock); list_for_each_entry(c, &mrt->mfc_unres_queue, list) { if (c->mfc_mcastgrp == iph->daddr && c->mfc_origin == iph->saddr) { found = true; break; } } if (!found) { /* Create a new entry if allowable */ if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 || (c = ipmr_cache_alloc_unres()) == NULL) { spin_unlock_bh(&mfc_unres_lock); kfree_skb(skb); return -ENOBUFS; } /* Fill in the new cache entry */ c->mfc_parent = -1; c->mfc_origin = iph->saddr; c->mfc_mcastgrp = iph->daddr; /* Reflect first query at mrouted. */ err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE); if (err < 0) { /* If the report failed throw the cache entry out - Brad Parker */ spin_unlock_bh(&mfc_unres_lock); ipmr_cache_free(c); kfree_skb(skb); return err; } atomic_inc(&mrt->cache_resolve_queue_len); list_add(&c->list, &mrt->mfc_unres_queue); if (atomic_read(&mrt->cache_resolve_queue_len) == 1) mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires); } /* See if we can append the packet */ if (c->mfc_un.unres.unresolved.qlen > 3) { kfree_skb(skb); err = -ENOBUFS; } else { skb_queue_tail(&c->mfc_un.unres.unresolved, skb); err = 0; } spin_unlock_bh(&mfc_unres_lock); return err; } /* * MFC cache manipulation by user space mroute daemon */ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc) { int line; struct mfc_cache *c, *next; line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { list_del_rcu(&c->list); ipmr_cache_free(c); return 0; } } return -ENOENT; } static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, struct mfcctl *mfc, int mrtsock) { bool found = false; int line; struct mfc_cache *uc, *c; if (mfc->mfcc_parent >= MAXVIFS) return -ENFILE; line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); list_for_each_entry(c, &mrt->mfc_cache_array[line], list) { if (c->mfc_origin == mfc->mfcc_origin.s_addr && c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { found = true; break; } } if (found) { write_lock_bh(&mrt_lock); c->mfc_parent = mfc->mfcc_parent; ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; write_unlock_bh(&mrt_lock); return 0; } if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) return -EINVAL; c = ipmr_cache_alloc(); if (c == NULL) return -ENOMEM; c->mfc_origin = mfc->mfcc_origin.s_addr; c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; c->mfc_parent = mfc->mfcc_parent; ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls); if (!mrtsock) c->mfc_flags |= MFC_STATIC; list_add_rcu(&c->list, &mrt->mfc_cache_array[line]); /* * Check to see if we resolved a queued list. If so we * need to send on the frames and tidy up. */ found = false; spin_lock_bh(&mfc_unres_lock); list_for_each_entry(uc, &mrt->mfc_unres_queue, list) { if (uc->mfc_origin == c->mfc_origin && uc->mfc_mcastgrp == c->mfc_mcastgrp) { list_del(&uc->list); atomic_dec(&mrt->cache_resolve_queue_len); found = true; break; } } if (list_empty(&mrt->mfc_unres_queue)) del_timer(&mrt->ipmr_expire_timer); spin_unlock_bh(&mfc_unres_lock); if (found) { ipmr_cache_resolve(net, mrt, uc, c); ipmr_cache_free(uc); } return 0; } /* * Close the multicast socket, and clear the vif tables etc */ static void mroute_clean_tables(struct mr_table *mrt) { int i; LIST_HEAD(list); struct mfc_cache *c, *next; /* Shut down all active vif entries */ for (i = 0; i < mrt->maxvif; i++) { if (!(mrt->vif_table[i].flags & VIFF_STATIC)) vif_delete(mrt, i, 0, &list); } unregister_netdevice_many(&list); /* Wipe the cache */ for (i = 0; i < MFC_LINES; i++) { list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { if (c->mfc_flags & MFC_STATIC) continue; list_del_rcu(&c->list); ipmr_cache_free(c); } } if (atomic_read(&mrt->cache_resolve_queue_len) != 0) { spin_lock_bh(&mfc_unres_lock); list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) { list_del(&c->list); ipmr_destroy_unres(mrt, c); } spin_unlock_bh(&mfc_unres_lock); } } /* called from ip_ra_control(), before an RCU grace period, * we dont need to call synchronize_rcu() here */ static void mrtsock_destruct(struct sock *sk) { struct net *net = sock_net(sk); struct mr_table *mrt; rtnl_lock(); ipmr_for_each_table(mrt, net) { if (sk == rtnl_dereference(mrt->mroute_sk)) { IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; RCU_INIT_POINTER(mrt->mroute_sk, NULL); mroute_clean_tables(mrt); } } rtnl_unlock(); } /* * Socket options and virtual interface manipulation. The whole * virtual interface system is a complete heap, but unfortunately * that's how BSD mrouted happens to think. Maybe one day with a proper * MOSPF/PIM router set up we can clean this up. */ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen) { int ret; struct vifctl vif; struct mfcctl mfc; struct net *net = sock_net(sk); struct mr_table *mrt; mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); if (mrt == NULL) return -ENOENT; if (optname != MRT_INIT) { if (sk != rcu_access_pointer(mrt->mroute_sk) && !capable(CAP_NET_ADMIN)) return -EACCES; } switch (optname) { case MRT_INIT: if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_IGMP) return -EOPNOTSUPP; if (optlen != sizeof(int)) return -ENOPROTOOPT; rtnl_lock(); if (rtnl_dereference(mrt->mroute_sk)) { rtnl_unlock(); return -EADDRINUSE; } ret = ip_ra_control(sk, 1, mrtsock_destruct); if (ret == 0) { rcu_assign_pointer(mrt->mroute_sk, sk); IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; } rtnl_unlock(); return ret; case MRT_DONE: if (sk != rcu_access_pointer(mrt->mroute_sk)) return -EACCES; return ip_ra_control(sk, 0, NULL); case MRT_ADD_VIF: case MRT_DEL_VIF: if (optlen != sizeof(vif)) return -EINVAL; if (copy_from_user(&vif, optval, sizeof(vif))) return -EFAULT; if (vif.vifc_vifi >= MAXVIFS) return -ENFILE; rtnl_lock(); if (optname == MRT_ADD_VIF) { ret = vif_add(net, mrt, &vif, sk == rtnl_dereference(mrt->mroute_sk)); } else { ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL); } rtnl_unlock(); return ret; /* * Manipulate the forwarding caches. These live * in a sort of kernel/user symbiosis. */ case MRT_ADD_MFC: case MRT_DEL_MFC: if (optlen != sizeof(mfc)) return -EINVAL; if (copy_from_user(&mfc, optval, sizeof(mfc))) return -EFAULT; rtnl_lock(); if (optname == MRT_DEL_MFC) ret = ipmr_mfc_delete(mrt, &mfc); else ret = ipmr_mfc_add(net, mrt, &mfc, sk == rtnl_dereference(mrt->mroute_sk)); rtnl_unlock(); return ret; /* * Control PIM assert. */ case MRT_ASSERT: { int v; if (get_user(v, (int __user *)optval)) return -EFAULT; mrt->mroute_do_assert = (v) ? 1 : 0; return 0; } #ifdef CONFIG_IP_PIMSM case MRT_PIM: { int v; if (get_user(v, (int __user *)optval)) return -EFAULT; v = (v) ? 1 : 0; rtnl_lock(); ret = 0; if (v != mrt->mroute_do_pim) { mrt->mroute_do_pim = v; mrt->mroute_do_assert = v; } rtnl_unlock(); return ret; } #endif #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES case MRT_TABLE: { u32 v; if (optlen != sizeof(u32)) return -EINVAL; if (get_user(v, (u32 __user *)optval)) return -EFAULT; rtnl_lock(); ret = 0; if (sk == rtnl_dereference(mrt->mroute_sk)) { ret = -EBUSY; } else { if (!ipmr_new_table(net, v)) ret = -ENOMEM; raw_sk(sk)->ipmr_table = v; } rtnl_unlock(); return ret; } #endif /* * Spurious command, or MRT_VERSION which you cannot * set. */ default: return -ENOPROTOOPT; } } /* * Getsock opt support for the multicast routing system. */ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) { int olr; int val; struct net *net = sock_net(sk); struct mr_table *mrt; mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); if (mrt == NULL) return -ENOENT; if (optname != MRT_VERSION && #ifdef CONFIG_IP_PIMSM optname != MRT_PIM && #endif optname != MRT_ASSERT) return -ENOPROTOOPT; if (get_user(olr, optlen)) return -EFAULT; olr = min_t(unsigned int, olr, sizeof(int)); if (olr < 0) return -EINVAL; if (put_user(olr, optlen)) return -EFAULT; if (optname == MRT_VERSION) val = 0x0305; #ifdef CONFIG_IP_PIMSM else if (optname == MRT_PIM) val = mrt->mroute_do_pim; #endif else val = mrt->mroute_do_assert; if (copy_to_user(optval, &val, olr)) return -EFAULT; return 0; } /* * The IP multicast ioctl support routines. */ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) { struct sioc_sg_req sr; struct sioc_vif_req vr; struct vif_device *vif; struct mfc_cache *c; struct net *net = sock_net(sk); struct mr_table *mrt; mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); if (mrt == NULL) return -ENOENT; switch (cmd) { case SIOCGETVIFCNT: if (copy_from_user(&vr, arg, sizeof(vr))) return -EFAULT; if (vr.vifi >= mrt->maxvif) return -EINVAL; read_lock(&mrt_lock); vif = &mrt->vif_table[vr.vifi]; if (VIF_EXISTS(mrt, vr.vifi)) { vr.icount = vif->pkt_in; vr.ocount = vif->pkt_out; vr.ibytes = vif->bytes_in; vr.obytes = vif->bytes_out; read_unlock(&mrt_lock); if (copy_to_user(arg, &vr, sizeof(vr))) return -EFAULT; return 0; } read_unlock(&mrt_lock); return -EADDRNOTAVAIL; case SIOCGETSGCNT: if (copy_from_user(&sr, arg, sizeof(sr))) return -EFAULT; rcu_read_lock(); c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); if (c) { sr.pktcnt = c->mfc_un.res.pkt; sr.bytecnt = c->mfc_un.res.bytes; sr.wrong_if = c->mfc_un.res.wrong_if; rcu_read_unlock(); if (copy_to_user(arg, &sr, sizeof(sr))) return -EFAULT; return 0; } rcu_read_unlock(); return -EADDRNOTAVAIL; default: return -ENOIOCTLCMD; } } #ifdef CONFIG_COMPAT struct compat_sioc_sg_req { struct in_addr src; struct in_addr grp; compat_ulong_t pktcnt; compat_ulong_t bytecnt; compat_ulong_t wrong_if; }; struct compat_sioc_vif_req { vifi_t vifi; /* Which iface */ compat_ulong_t icount; compat_ulong_t ocount; compat_ulong_t ibytes; compat_ulong_t obytes; }; int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) { struct compat_sioc_sg_req sr; struct compat_sioc_vif_req vr; struct vif_device *vif; struct mfc_cache *c; struct net *net = sock_net(sk); struct mr_table *mrt; mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); if (mrt == NULL) return -ENOENT; switch (cmd) { case SIOCGETVIFCNT: if (copy_from_user(&vr, arg, sizeof(vr))) return -EFAULT; if (vr.vifi >= mrt->maxvif) return -EINVAL; read_lock(&mrt_lock); vif = &mrt->vif_table[vr.vifi]; if (VIF_EXISTS(mrt, vr.vifi)) { vr.icount = vif->pkt_in; vr.ocount = vif->pkt_out; vr.ibytes = vif->bytes_in; vr.obytes = vif->bytes_out; read_unlock(&mrt_lock); if (copy_to_user(arg, &vr, sizeof(vr))) return -EFAULT; return 0; } read_unlock(&mrt_lock); return -EADDRNOTAVAIL; case SIOCGETSGCNT: if (copy_from_user(&sr, arg, sizeof(sr))) return -EFAULT; rcu_read_lock(); c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); if (c) { sr.pktcnt = c->mfc_un.res.pkt; sr.bytecnt = c->mfc_un.res.bytes; sr.wrong_if = c->mfc_un.res.wrong_if; rcu_read_unlock(); if (copy_to_user(arg, &sr, sizeof(sr))) return -EFAULT; return 0; } rcu_read_unlock(); return -EADDRNOTAVAIL; default: return -ENOIOCTLCMD; } } #endif static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct net *net = dev_net(dev); struct mr_table *mrt; struct vif_device *v; int ct; if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; ipmr_for_each_table(mrt, net) { v = &mrt->vif_table[0]; for (ct = 0; ct < mrt->maxvif; ct++, v++) { if (v->dev == dev) vif_delete(mrt, ct, 1, NULL); } } return NOTIFY_DONE; } static struct notifier_block ip_mr_notifier = { .notifier_call = ipmr_device_event, }; /* * Encapsulate a packet by attaching a valid IPIP header to it. * This avoids tunnel drivers and other mess and gives us the speed so * important for multicast video. */ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) { struct iphdr *iph; const struct iphdr *old_iph = ip_hdr(skb); skb_push(skb, sizeof(struct iphdr)); skb->transport_header = skb->network_header; skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->tos = old_iph->tos; iph->ttl = old_iph->ttl; iph->frag_off = 0; iph->daddr = daddr; iph->saddr = saddr; iph->protocol = IPPROTO_IPIP; iph->ihl = 5; iph->tot_len = htons(skb->len); ip_select_ident(iph, skb_dst(skb), NULL); ip_send_check(iph); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); nf_reset(skb); } static inline int ipmr_forward_finish(struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); if (unlikely(opt->optlen)) ip_forward_options(skb); return dst_output(skb); } /* * Processing handlers for ipmr_forward */ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, int vifi) { const struct iphdr *iph = ip_hdr(skb); struct vif_device *vif = &mrt->vif_table[vifi]; struct net_device *dev; struct rtable *rt; struct flowi4 fl4; int encap = 0; if (vif->dev == NULL) goto out_free; #ifdef CONFIG_IP_PIMSM if (vif->flags & VIFF_REGISTER) { vif->pkt_out++; vif->bytes_out += skb->len; vif->dev->stats.tx_bytes += skb->len; vif->dev->stats.tx_packets++; ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT); goto out_free; } #endif if (vif->flags & VIFF_TUNNEL) { rt = ip_route_output_ports(net, &fl4, NULL, vif->remote, vif->local, 0, 0, IPPROTO_IPIP, RT_TOS(iph->tos), vif->link); if (IS_ERR(rt)) goto out_free; encap = sizeof(struct iphdr); } else { rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0, 0, 0, IPPROTO_IPIP, RT_TOS(iph->tos), vif->link); if (IS_ERR(rt)) goto out_free; } dev = rt->dst.dev; if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) { /* Do not fragment multicasts. Alas, IPv4 does not * allow to send ICMP, so that packets will disappear * to blackhole. */ IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS); ip_rt_put(rt); goto out_free; } encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len; if (skb_cow(skb, encap)) { ip_rt_put(rt); goto out_free; } vif->pkt_out++; vif->bytes_out += skb->len; skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); ip_decrease_ttl(ip_hdr(skb)); /* FIXME: forward and output firewalls used to be called here. * What do we do with netfilter? -- RR */ if (vif->flags & VIFF_TUNNEL) { ip_encap(skb, vif->local, vif->remote); /* FIXME: extra output firewall step used to be here. --RR */ vif->dev->stats.tx_packets++; vif->dev->stats.tx_bytes += skb->len; } IPCB(skb)->flags |= IPSKB_FORWARDED; /* * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally * not only before forwarding, but after forwarding on all output * interfaces. It is clear, if mrouter runs a multicasting * program, it should receive packets not depending to what interface * program is joined. * If we will not make it, the program will have to join on all * interfaces. On the other hand, multihoming host (or router, but * not mrouter) cannot join to more than one interface - it will * result in receiving multiple packets. */ NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev, ipmr_forward_finish); return; out_free: kfree_skb(skb); } static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev) { int ct; for (ct = mrt->maxvif-1; ct >= 0; ct--) { if (mrt->vif_table[ct].dev == dev) break; } return ct; } /* "local" means that we should preserve one skb (for local delivery) */ static int ip_mr_forward(struct net *net, struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *cache, int local) { int psend = -1; int vif, ct; vif = cache->mfc_parent; cache->mfc_un.res.pkt++; cache->mfc_un.res.bytes += skb->len; /* * Wrong interface: drop packet and (maybe) send PIM assert. */ if (mrt->vif_table[vif].dev != skb->dev) { int true_vifi; if (rt_is_output_route(skb_rtable(skb))) { /* It is our own packet, looped back. * Very complicated situation... * * The best workaround until routing daemons will be * fixed is not to redistribute packet, if it was * send through wrong interface. It means, that * multicast applications WILL NOT work for * (S,G), which have default multicast route pointing * to wrong oif. In any case, it is not a good * idea to use multicasting applications on router. */ goto dont_forward; } cache->mfc_un.res.wrong_if++; true_vifi = ipmr_find_vif(mrt, skb->dev); if (true_vifi >= 0 && mrt->mroute_do_assert && /* pimsm uses asserts, when switching from RPT to SPT, * so that we cannot check that packet arrived on an oif. * It is bad, but otherwise we would need to move pretty * large chunk of pimd to kernel. Ough... --ANK */ (mrt->mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) && time_after(jiffies, cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { cache->mfc_un.res.last_assert = jiffies; ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF); } goto dont_forward; } mrt->vif_table[vif].pkt_in++; mrt->vif_table[vif].bytes_in += skb->len; /* * Forward the frame */ for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) { if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) { if (psend != -1) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) ipmr_queue_xmit(net, mrt, skb2, cache, psend); } psend = ct; } } if (psend != -1) { if (local) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) ipmr_queue_xmit(net, mrt, skb2, cache, psend); } else { ipmr_queue_xmit(net, mrt, skb, cache, psend); return 0; } } dont_forward: if (!local) kfree_skb(skb); return 0; } static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb) { struct rtable *rt = skb_rtable(skb); struct iphdr *iph = ip_hdr(skb); struct flowi4 fl4 = { .daddr = iph->daddr, .saddr = iph->saddr, .flowi4_tos = RT_TOS(iph->tos), .flowi4_oif = rt->rt_oif, .flowi4_iif = rt->rt_iif, .flowi4_mark = rt->rt_mark, }; struct mr_table *mrt; int err; err = ipmr_fib_lookup(net, &fl4, &mrt); if (err) return ERR_PTR(err); return mrt; } /* * Multicast packets for forwarding arrive here * Called with rcu_read_lock(); */ int ip_mr_input(struct sk_buff *skb) { struct mfc_cache *cache; struct net *net = dev_net(skb->dev); int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; struct mr_table *mrt; /* Packet is looped back after forward, it should not be * forwarded second time, but still can be delivered locally. */ if (IPCB(skb)->flags & IPSKB_FORWARDED) goto dont_forward; mrt = ipmr_rt_fib_lookup(net, skb); if (IS_ERR(mrt)) { kfree_skb(skb); return PTR_ERR(mrt); } if (!local) { if (IPCB(skb)->opt.router_alert) { if (ip_call_ra_chain(skb)) return 0; } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) { /* IGMPv1 (and broken IGMPv2 implementations sort of * Cisco IOS <= 11.2(8)) do not put router alert * option to IGMP packets destined to routable * groups. It is very bad, because it means * that we can forward NO IGMP messages. */ struct sock *mroute_sk; mroute_sk = rcu_dereference(mrt->mroute_sk); if (mroute_sk) { nf_reset(skb); raw_rcv(mroute_sk, skb); return 0; } } } /* already under rcu_read_lock() */ cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); /* * No usable cache entry */ if (cache == NULL) { int vif; if (local) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); ip_local_deliver(skb); if (skb2 == NULL) return -ENOBUFS; skb = skb2; } read_lock(&mrt_lock); vif = ipmr_find_vif(mrt, skb->dev); if (vif >= 0) { int err2 = ipmr_cache_unresolved(mrt, vif, skb); read_unlock(&mrt_lock); return err2; } read_unlock(&mrt_lock); kfree_skb(skb); return -ENODEV; } read_lock(&mrt_lock); ip_mr_forward(net, mrt, skb, cache, local); read_unlock(&mrt_lock); if (local) return ip_local_deliver(skb); return 0; dont_forward: if (local) return ip_local_deliver(skb); kfree_skb(skb); return 0; } #ifdef CONFIG_IP_PIMSM /* called with rcu_read_lock() */ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, unsigned int pimlen) { struct net_device *reg_dev = NULL; struct iphdr *encap; encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); /* * Check that: * a. packet is really sent to a multicast group * b. packet is not a NULL-REGISTER * c. packet is not truncated */ if (!ipv4_is_multicast(encap->daddr) || encap->tot_len == 0 || ntohs(encap->tot_len) + pimlen > skb->len) return 1; read_lock(&mrt_lock); if (mrt->mroute_reg_vif_num >= 0) reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; read_unlock(&mrt_lock); if (reg_dev == NULL) return 1; skb->mac_header = skb->network_header; skb_pull(skb, (u8 *)encap - skb->data); skb_reset_network_header(skb); skb->protocol = htons(ETH_P_IP); skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_HOST; skb_tunnel_rx(skb, reg_dev); netif_rx(skb); return NET_RX_SUCCESS; } #endif #ifdef CONFIG_IP_PIMSM_V1 /* * Handle IGMP messages of PIMv1 */ int pim_rcv_v1(struct sk_buff *skb) { struct igmphdr *pim; struct net *net = dev_net(skb->dev); struct mr_table *mrt; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) goto drop; pim = igmp_hdr(skb); mrt = ipmr_rt_fib_lookup(net, skb); if (IS_ERR(mrt)) goto drop; if (!mrt->mroute_do_pim || pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; if (__pim_rcv(mrt, skb, sizeof(*pim))) { drop: kfree_skb(skb); } return 0; } #endif #ifdef CONFIG_IP_PIMSM_V2 static int pim_rcv(struct sk_buff *skb) { struct pimreghdr *pim; struct net *net = dev_net(skb->dev); struct mr_table *mrt; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) goto drop; pim = (struct pimreghdr *)skb_transport_header(skb); if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) || (pim->flags & PIM_NULL_REGISTER) || (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && csum_fold(skb_checksum(skb, 0, skb->len, 0)))) goto drop; mrt = ipmr_rt_fib_lookup(net, skb); if (IS_ERR(mrt)) goto drop; if (__pim_rcv(mrt, skb, sizeof(*pim))) { drop: kfree_skb(skb); } return 0; } #endif static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) { int ct; struct rtnexthop *nhp; u8 *b = skb_tail_pointer(skb); struct rtattr *mp_head; /* If cache is unresolved, don't try to parse IIF and OIF */ if (c->mfc_parent >= MAXVIFS) return -ENOENT; if (VIF_EXISTS(mrt, c->mfc_parent)) RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex); mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) { if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) goto rtattr_failure; nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); nhp->rtnh_flags = 0; nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex; nhp->rtnh_len = sizeof(*nhp); } } mp_head->rta_type = RTA_MULTIPATH; mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head; rtm->rtm_type = RTN_MULTICAST; return 1; rtattr_failure: nlmsg_trim(skb, b); return -EMSGSIZE; } int ipmr_get_route(struct net *net, struct sk_buff *skb, __be32 saddr, __be32 daddr, struct rtmsg *rtm, int nowait) { struct mfc_cache *cache; struct mr_table *mrt; int err; mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); if (mrt == NULL) return -ENOENT; rcu_read_lock(); cache = ipmr_cache_find(mrt, saddr, daddr); if (cache == NULL) { struct sk_buff *skb2; struct iphdr *iph; struct net_device *dev; int vif = -1; if (nowait) { rcu_read_unlock(); return -EAGAIN; } dev = skb->dev; read_lock(&mrt_lock); if (dev) vif = ipmr_find_vif(mrt, dev); if (vif < 0) { read_unlock(&mrt_lock); rcu_read_unlock(); return -ENODEV; } skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) { read_unlock(&mrt_lock); rcu_read_unlock(); return -ENOMEM; } skb_push(skb2, sizeof(struct iphdr)); skb_reset_network_header(skb2); iph = ip_hdr(skb2); iph->ihl = sizeof(struct iphdr) >> 2; iph->saddr = saddr; iph->daddr = daddr; iph->version = 0; err = ipmr_cache_unresolved(mrt, vif, skb2); read_unlock(&mrt_lock); rcu_read_unlock(); return err; } read_lock(&mrt_lock); if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY)) cache->mfc_flags |= MFC_NOTIFY; err = __ipmr_fill_mroute(mrt, skb, cache, rtm); read_unlock(&mrt_lock); rcu_read_unlock(); return err; } static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, u32 pid, u32 seq, struct mfc_cache *c) { struct nlmsghdr *nlh; struct rtmsg *rtm; nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI); if (nlh == NULL) return -EMSGSIZE; rtm = nlmsg_data(nlh); rtm->rtm_family = RTNL_FAMILY_IPMR; rtm->rtm_dst_len = 32; rtm->rtm_src_len = 32; rtm->rtm_tos = 0; rtm->rtm_table = mrt->id; NLA_PUT_U32(skb, RTA_TABLE, mrt->id); rtm->rtm_type = RTN_MULTICAST; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = RTPROT_UNSPEC; rtm->rtm_flags = 0; NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin); NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp); if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) goto nla_put_failure; return nlmsg_end(skb, nlh); nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct mr_table *mrt; struct mfc_cache *mfc; unsigned int t = 0, s_t; unsigned int h = 0, s_h; unsigned int e = 0, s_e; s_t = cb->args[0]; s_h = cb->args[1]; s_e = cb->args[2]; rcu_read_lock(); ipmr_for_each_table(mrt, net) { if (t < s_t) goto next_table; if (t > s_t) s_h = 0; for (h = s_h; h < MFC_LINES; h++) { list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) { if (e < s_e) goto next_entry; if (ipmr_fill_mroute(mrt, skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, mfc) < 0) goto done; next_entry: e++; } e = s_e = 0; } s_h = 0; next_table: t++; } done: rcu_read_unlock(); cb->args[2] = e; cb->args[1] = h; cb->args[0] = t; return skb->len; } #ifdef CONFIG_PROC_FS /* * The /proc interfaces to multicast routing : * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif */ struct ipmr_vif_iter { struct seq_net_private p; struct mr_table *mrt; int ct; }; static struct vif_device *ipmr_vif_seq_idx(struct net *net, struct ipmr_vif_iter *iter, loff_t pos) { struct mr_table *mrt = iter->mrt; for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { if (!VIF_EXISTS(mrt, iter->ct)) continue; if (pos-- == 0) return &mrt->vif_table[iter->ct]; } return NULL; } static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) __acquires(mrt_lock) { struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); struct mr_table *mrt; mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); if (mrt == NULL) return ERR_PTR(-ENOENT); iter->mrt = mrt; read_lock(&mrt_lock); return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) : SEQ_START_TOKEN; } static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ipmr_vif_iter *iter = seq->private; struct net *net = seq_file_net(seq); struct mr_table *mrt = iter->mrt; ++*pos; if (v == SEQ_START_TOKEN) return ipmr_vif_seq_idx(net, iter, 0); while (++iter->ct < mrt->maxvif) { if (!VIF_EXISTS(mrt, iter->ct)) continue; return &mrt->vif_table[iter->ct]; } return NULL; } static void ipmr_vif_seq_stop(struct seq_file *seq, void *v) __releases(mrt_lock) { read_unlock(&mrt_lock); } static int ipmr_vif_seq_show(struct seq_file *seq, void *v) { struct ipmr_vif_iter *iter = seq->private; struct mr_table *mrt = iter->mrt; if (v == SEQ_START_TOKEN) { seq_puts(seq, "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n"); } else { const struct vif_device *vif = v; const char *name = vif->dev ? vif->dev->name : "none"; seq_printf(seq, "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", vif - mrt->vif_table, name, vif->bytes_in, vif->pkt_in, vif->bytes_out, vif->pkt_out, vif->flags, vif->local, vif->remote); } return 0; } static const struct seq_operations ipmr_vif_seq_ops = { .start = ipmr_vif_seq_start, .next = ipmr_vif_seq_next, .stop = ipmr_vif_seq_stop, .show = ipmr_vif_seq_show, }; static int ipmr_vif_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &ipmr_vif_seq_ops, sizeof(struct ipmr_vif_iter)); } static const struct file_operations ipmr_vif_fops = { .owner = THIS_MODULE, .open = ipmr_vif_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; struct ipmr_mfc_iter { struct seq_net_private p; struct mr_table *mrt; struct list_head *cache; int ct; }; static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, struct ipmr_mfc_iter *it, loff_t pos) { struct mr_table *mrt = it->mrt; struct mfc_cache *mfc; rcu_read_lock(); for (it->ct = 0; it->ct < MFC_LINES; it->ct++) { it->cache = &mrt->mfc_cache_array[it->ct]; list_for_each_entry_rcu(mfc, it->cache, list) if (pos-- == 0) return mfc; } rcu_read_unlock(); spin_lock_bh(&mfc_unres_lock); it->cache = &mrt->mfc_unres_queue; list_for_each_entry(mfc, it->cache, list) if (pos-- == 0) return mfc; spin_unlock_bh(&mfc_unres_lock); it->cache = NULL; return NULL; } static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) { struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); struct mr_table *mrt; mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); if (mrt == NULL) return ERR_PTR(-ENOENT); it->mrt = mrt; it->cache = NULL; it->ct = 0; return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) : SEQ_START_TOKEN; } static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct mfc_cache *mfc = v; struct ipmr_mfc_iter *it = seq->private; struct net *net = seq_file_net(seq); struct mr_table *mrt = it->mrt; ++*pos; if (v == SEQ_START_TOKEN) return ipmr_mfc_seq_idx(net, seq->private, 0); if (mfc->list.next != it->cache) return list_entry(mfc->list.next, struct mfc_cache, list); if (it->cache == &mrt->mfc_unres_queue) goto end_of_list; BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]); while (++it->ct < MFC_LINES) { it->cache = &mrt->mfc_cache_array[it->ct]; if (list_empty(it->cache)) continue; return list_first_entry(it->cache, struct mfc_cache, list); } /* exhausted cache_array, show unresolved */ rcu_read_unlock(); it->cache = &mrt->mfc_unres_queue; it->ct = 0; spin_lock_bh(&mfc_unres_lock); if (!list_empty(it->cache)) return list_first_entry(it->cache, struct mfc_cache, list); end_of_list: spin_unlock_bh(&mfc_unres_lock); it->cache = NULL; return NULL; } static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) { struct ipmr_mfc_iter *it = seq->private; struct mr_table *mrt = it->mrt; if (it->cache == &mrt->mfc_unres_queue) spin_unlock_bh(&mfc_unres_lock); else if (it->cache == &mrt->mfc_cache_array[it->ct]) rcu_read_unlock(); } static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) { int n; if (v == SEQ_START_TOKEN) { seq_puts(seq, "Group Origin Iif Pkts Bytes Wrong Oifs\n"); } else { const struct mfc_cache *mfc = v; const struct ipmr_mfc_iter *it = seq->private; const struct mr_table *mrt = it->mrt; seq_printf(seq, "%08X %08X %-3hd", (__force u32) mfc->mfc_mcastgrp, (__force u32) mfc->mfc_origin, mfc->mfc_parent); if (it->cache != &mrt->mfc_unres_queue) { seq_printf(seq, " %8lu %8lu %8lu", mfc->mfc_un.res.pkt, mfc->mfc_un.res.bytes, mfc->mfc_un.res.wrong_if); for (n = mfc->mfc_un.res.minvif; n < mfc->mfc_un.res.maxvif; n++) { if (VIF_EXISTS(mrt, n) && mfc->mfc_un.res.ttls[n] < 255) seq_printf(seq, " %2d:%-3d", n, mfc->mfc_un.res.ttls[n]); } } else { /* unresolved mfc_caches don't contain * pkt, bytes and wrong_if values */ seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul); } seq_putc(seq, '\n'); } return 0; } static const struct seq_operations ipmr_mfc_seq_ops = { .start = ipmr_mfc_seq_start, .next = ipmr_mfc_seq_next, .stop = ipmr_mfc_seq_stop, .show = ipmr_mfc_seq_show, }; static int ipmr_mfc_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &ipmr_mfc_seq_ops, sizeof(struct ipmr_mfc_iter)); } static const struct file_operations ipmr_mfc_fops = { .owner = THIS_MODULE, .open = ipmr_mfc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif #ifdef CONFIG_IP_PIMSM_V2 static const struct net_protocol pim_protocol = { .handler = pim_rcv, .netns_ok = 1, }; #endif /* * Setup for IP multicast routing */ static int __net_init ipmr_net_init(struct net *net) { int err; err = ipmr_rules_init(net); if (err < 0) goto fail; #ifdef CONFIG_PROC_FS err = -ENOMEM; if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops)) goto proc_vif_fail; if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops)) goto proc_cache_fail; #endif return 0; #ifdef CONFIG_PROC_FS proc_cache_fail: proc_net_remove(net, "ip_mr_vif"); proc_vif_fail: ipmr_rules_exit(net); #endif fail: return err; } static void __net_exit ipmr_net_exit(struct net *net) { #ifdef CONFIG_PROC_FS proc_net_remove(net, "ip_mr_cache"); proc_net_remove(net, "ip_mr_vif"); #endif ipmr_rules_exit(net); } static struct pernet_operations ipmr_net_ops = { .init = ipmr_net_init, .exit = ipmr_net_exit, }; int __init ip_mr_init(void) { int err; mrt_cachep = kmem_cache_create("ip_mrt_cache", sizeof(struct mfc_cache), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); if (!mrt_cachep) return -ENOMEM; err = register_pernet_subsys(&ipmr_net_ops); if (err) goto reg_pernet_fail; err = register_netdevice_notifier(&ip_mr_notifier); if (err) goto reg_notif_fail; #ifdef CONFIG_IP_PIMSM_V2 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) { pr_err("%s: can't add PIM protocol\n", __func__); err = -EAGAIN; goto add_proto_fail; } #endif rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute, NULL); return 0; #ifdef CONFIG_IP_PIMSM_V2 add_proto_fail: unregister_netdevice_notifier(&ip_mr_notifier); #endif reg_notif_fail: unregister_pernet_subsys(&ipmr_net_ops); reg_pernet_fail: kmem_cache_destroy(mrt_cachep); return err; }
gpl-2.0
qhuang00/terasic_MTL
arch/powerpc/platforms/82xx/pq2.c
4103
1947
/* * Common PowerQUICC II code. * * Author: Scott Wood <scottwood@freescale.com> * Copyright (c) 2007 Freescale Semiconductor * * Based on code by Vitaly Bordug <vbordug@ru.mvista.com> * pq2_restart fix by Wade Farnsworth <wfarnsworth@mvista.com> * Copyright (c) 2006 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <asm/cpm2.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <platforms/82xx/pq2.h> #define RMR_CSRE 0x00000001 void pq2_restart(char *cmd) { local_irq_disable(); setbits32(&cpm2_immr->im_clkrst.car_rmr, RMR_CSRE); /* Clear the ME,EE,IR & DR bits in MSR to cause checkstop */ mtmsr(mfmsr() & ~(MSR_ME | MSR_EE | MSR_IR | MSR_DR)); in_8(&cpm2_immr->im_clkrst.res[0]); panic("Restart failed\n"); } #ifdef CONFIG_PCI static int pq2_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn) { if (bus == 0 && PCI_SLOT(devfn) == 0) return PCIBIOS_DEVICE_NOT_FOUND; else return PCIBIOS_SUCCESSFUL; } static void __init pq2_pci_add_bridge(struct device_node *np) { struct pci_controller *hose; struct resource r; if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b) goto err; pci_add_flags(PCI_REASSIGN_ALL_BUS); hose = pcibios_alloc_controller(np); if (!hose) return; hose->dn = np; setup_indirect_pci(hose, r.start + 0x100, r.start + 0x104, 0); pci_process_bridge_OF_ranges(hose, np, 1); return; err: printk(KERN_ERR "No valid PCI reg property in device tree\n"); } void __init pq2_init_pci(void) { struct device_node *np; ppc_md.pci_exclude_device = pq2_pci_exclude_device; for_each_compatible_node(np, NULL, "fsl,pq2-pci") pq2_pci_add_bridge(np); } #endif
gpl-2.0
boa19861105/BOA_Eye_M6.0_Kernel
arch/arm/mach-imx/clock-imx1.c
5383
14220
/* * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/list.h> #include <linux/math64.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/clkdev.h> #include <mach/clock.h> #include <mach/hardware.h> #include <mach/common.h> #define IO_ADDR_CCM(off) (MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off))) /* CCM register addresses */ #define CCM_CSCR IO_ADDR_CCM(0x0) #define CCM_MPCTL0 IO_ADDR_CCM(0x4) #define CCM_SPCTL0 IO_ADDR_CCM(0xc) #define CCM_PCDR IO_ADDR_CCM(0x20) #define CCM_CSCR_CLKO_OFFSET 29 #define CCM_CSCR_CLKO_MASK (0x7 << 29) #define CCM_CSCR_USB_OFFSET 26 #define CCM_CSCR_USB_MASK (0x7 << 26) #define CCM_CSCR_OSC_EN_SHIFT 17 #define CCM_CSCR_SYSTEM_SEL (1 << 16) #define CCM_CSCR_BCLK_OFFSET 10 #define CCM_CSCR_BCLK_MASK (0xf << 10) #define CCM_CSCR_PRESC (1 << 15) #define CCM_PCDR_PCLK3_OFFSET 16 #define CCM_PCDR_PCLK3_MASK (0x7f << 16) #define CCM_PCDR_PCLK2_OFFSET 4 #define CCM_PCDR_PCLK2_MASK (0xf << 4) #define CCM_PCDR_PCLK1_OFFSET 0 #define CCM_PCDR_PCLK1_MASK 0xf #define IO_ADDR_SCM(off) (MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off))) /* SCM register addresses */ #define SCM_GCCR IO_ADDR_SCM(0xc) #define SCM_GCCR_DMA_CLK_EN_OFFSET 3 #define SCM_GCCR_CSI_CLK_EN_OFFSET 2 #define SCM_GCCR_MMA_CLK_EN_OFFSET 1 #define SCM_GCCR_USBD_CLK_EN_OFFSET 0 static int _clk_enable(struct clk *clk) { unsigned int reg; reg = __raw_readl(clk->enable_reg); reg |= 1 << clk->enable_shift; __raw_writel(reg, clk->enable_reg); return 0; } static void _clk_disable(struct clk *clk) { unsigned int reg; reg = __raw_readl(clk->enable_reg); reg &= ~(1 << clk->enable_shift); __raw_writel(reg, clk->enable_reg); } static int _clk_can_use_parent(const struct clk *clk_arr[], unsigned int size, struct clk *parent) { int i; for (i = 0; i < size; i++) if (parent == clk_arr[i]) return i; return -EINVAL; } static unsigned long _clk_simple_round_rate(struct clk *clk, unsigned long rate, unsigned int limit) { int div; unsigned long parent_rate; parent_rate = clk_get_rate(clk->parent); div = parent_rate / rate; if (parent_rate % rate) div++; if (div > limit) div = limit; return parent_rate / div; } static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate) { return clk->parent->round_rate(clk->parent, rate); } static int _clk_parent_set_rate(struct clk *clk, unsigned long rate) { return clk->parent->set_rate(clk->parent, rate); } static unsigned long clk16m_get_rate(struct clk *clk) { return 16000000; } static struct clk clk16m = { .get_rate = clk16m_get_rate, .enable = _clk_enable, .enable_reg = CCM_CSCR, .enable_shift = CCM_CSCR_OSC_EN_SHIFT, .disable = _clk_disable, }; /* in Hz */ static unsigned long clk32_rate; static unsigned long clk32_get_rate(struct clk *clk) { return clk32_rate; } static struct clk clk32 = { .get_rate = clk32_get_rate, }; static unsigned long clk32_premult_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) * 512; } static struct clk clk32_premult = { .parent = &clk32, .get_rate = clk32_premult_get_rate, }; static const struct clk *prem_clk_clocks[] = { &clk32_premult, &clk16m, }; static int prem_clk_set_parent(struct clk *clk, struct clk *parent) { int i; unsigned int reg = __raw_readl(CCM_CSCR); i = _clk_can_use_parent(prem_clk_clocks, ARRAY_SIZE(prem_clk_clocks), parent); switch (i) { case 0: reg &= ~CCM_CSCR_SYSTEM_SEL; break; case 1: reg |= CCM_CSCR_SYSTEM_SEL; break; default: return i; } __raw_writel(reg, CCM_CSCR); return 0; } static struct clk prem_clk = { .set_parent = prem_clk_set_parent, }; static unsigned long system_clk_get_rate(struct clk *clk) { return mxc_decode_pll(__raw_readl(CCM_SPCTL0), clk_get_rate(clk->parent)); } static struct clk system_clk = { .parent = &prem_clk, .get_rate = system_clk_get_rate, }; static unsigned long mcu_clk_get_rate(struct clk *clk) { return mxc_decode_pll(__raw_readl(CCM_MPCTL0), clk_get_rate(clk->parent)); } static struct clk mcu_clk = { .parent = &clk32_premult, .get_rate = mcu_clk_get_rate, }; static unsigned long fclk_get_rate(struct clk *clk) { unsigned long fclk = clk_get_rate(clk->parent); if (__raw_readl(CCM_CSCR) & CCM_CSCR_PRESC) fclk /= 2; return fclk; } static struct clk fclk = { .parent = &mcu_clk, .get_rate = fclk_get_rate, }; /* * get hclk ( SDRAM, CSI, Memory Stick, I2C, DMA ) */ static unsigned long hclk_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) & CCM_CSCR_BCLK_MASK) >> CCM_CSCR_BCLK_OFFSET) + 1); } static unsigned long hclk_round_rate(struct clk *clk, unsigned long rate) { return _clk_simple_round_rate(clk, rate, 16); } static int hclk_set_rate(struct clk *clk, unsigned long rate) { unsigned int div; unsigned int reg; unsigned long parent_rate; parent_rate = clk_get_rate(clk->parent); div = parent_rate / rate; if (div > 16 || div < 1 || ((parent_rate / div) != rate)) return -EINVAL; div--; reg = __raw_readl(CCM_CSCR); reg &= ~CCM_CSCR_BCLK_MASK; reg |= div << CCM_CSCR_BCLK_OFFSET; __raw_writel(reg, CCM_CSCR); return 0; } static struct clk hclk = { .parent = &system_clk, .get_rate = hclk_get_rate, .round_rate = hclk_round_rate, .set_rate = hclk_set_rate, }; static unsigned long clk48m_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) & CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET) + 1); } static unsigned long clk48m_round_rate(struct clk *clk, unsigned long rate) { return _clk_simple_round_rate(clk, rate, 8); } static int clk48m_set_rate(struct clk *clk, unsigned long rate) { unsigned int div; unsigned int reg; unsigned long parent_rate; parent_rate = clk_get_rate(clk->parent); div = parent_rate / rate; if (div > 8 || div < 1 || ((parent_rate / div) != rate)) return -EINVAL; div--; reg = __raw_readl(CCM_CSCR); reg &= ~CCM_CSCR_USB_MASK; reg |= div << CCM_CSCR_USB_OFFSET; __raw_writel(reg, CCM_CSCR); return 0; } static struct clk clk48m = { .parent = &system_clk, .get_rate = clk48m_get_rate, .round_rate = clk48m_round_rate, .set_rate = clk48m_set_rate, }; /* * get peripheral clock 1 ( UART[12], Timer[12], PWM ) */ static unsigned long perclk1_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) & CCM_PCDR_PCLK1_MASK) >> CCM_PCDR_PCLK1_OFFSET) + 1); } static unsigned long perclk1_round_rate(struct clk *clk, unsigned long rate) { return _clk_simple_round_rate(clk, rate, 16); } static int perclk1_set_rate(struct clk *clk, unsigned long rate) { unsigned int div; unsigned int reg; unsigned long parent_rate; parent_rate = clk_get_rate(clk->parent); div = parent_rate / rate; if (div > 16 || div < 1 || ((parent_rate / div) != rate)) return -EINVAL; div--; reg = __raw_readl(CCM_PCDR); reg &= ~CCM_PCDR_PCLK1_MASK; reg |= div << CCM_PCDR_PCLK1_OFFSET; __raw_writel(reg, CCM_PCDR); return 0; } /* * get peripheral clock 2 ( LCD, SD, SPI[12] ) */ static unsigned long perclk2_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) & CCM_PCDR_PCLK2_MASK) >> CCM_PCDR_PCLK2_OFFSET) + 1); } static unsigned long perclk2_round_rate(struct clk *clk, unsigned long rate) { return _clk_simple_round_rate(clk, rate, 16); } static int perclk2_set_rate(struct clk *clk, unsigned long rate) { unsigned int div; unsigned int reg; unsigned long parent_rate; parent_rate = clk_get_rate(clk->parent); div = parent_rate / rate; if (div > 16 || div < 1 || ((parent_rate / div) != rate)) return -EINVAL; div--; reg = __raw_readl(CCM_PCDR); reg &= ~CCM_PCDR_PCLK2_MASK; reg |= div << CCM_PCDR_PCLK2_OFFSET; __raw_writel(reg, CCM_PCDR); return 0; } /* * get peripheral clock 3 ( SSI ) */ static unsigned long perclk3_get_rate(struct clk *clk) { return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) & CCM_PCDR_PCLK3_MASK) >> CCM_PCDR_PCLK3_OFFSET) + 1); } static unsigned long perclk3_round_rate(struct clk *clk, unsigned long rate) { return _clk_simple_round_rate(clk, rate, 128); } static int perclk3_set_rate(struct clk *clk, unsigned long rate) { unsigned int div; unsigned int reg; unsigned long parent_rate; parent_rate = clk_get_rate(clk->parent); div = parent_rate / rate; if (div > 128 || div < 1 || ((parent_rate / div) != rate)) return -EINVAL; div--; reg = __raw_readl(CCM_PCDR); reg &= ~CCM_PCDR_PCLK3_MASK; reg |= div << CCM_PCDR_PCLK3_OFFSET; __raw_writel(reg, CCM_PCDR); return 0; } static struct clk perclk[] = { { .id = 0, .parent = &system_clk, .get_rate = perclk1_get_rate, .round_rate = perclk1_round_rate, .set_rate = perclk1_set_rate, }, { .id = 1, .parent = &system_clk, .get_rate = perclk2_get_rate, .round_rate = perclk2_round_rate, .set_rate = perclk2_set_rate, }, { .id = 2, .parent = &system_clk, .get_rate = perclk3_get_rate, .round_rate = perclk3_round_rate, .set_rate = perclk3_set_rate, } }; static const struct clk *clko_clocks[] = { &perclk[0], &hclk, &clk48m, &clk16m, &prem_clk, &fclk, }; static int clko_set_parent(struct clk *clk, struct clk *parent) { int i; unsigned int reg; i = _clk_can_use_parent(clko_clocks, ARRAY_SIZE(clko_clocks), parent); if (i < 0) return i; reg = __raw_readl(CCM_CSCR) & ~CCM_CSCR_CLKO_MASK; reg |= i << CCM_CSCR_CLKO_OFFSET; __raw_writel(reg, CCM_CSCR); if (clko_clocks[i]->set_rate && clko_clocks[i]->round_rate) { clk->set_rate = _clk_parent_set_rate; clk->round_rate = _clk_parent_round_rate; } else { clk->set_rate = NULL; clk->round_rate = NULL; } return 0; } static struct clk clko_clk = { .set_parent = clko_set_parent, }; static struct clk dma_clk = { .parent = &hclk, .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, .enable = _clk_enable, .enable_reg = SCM_GCCR, .enable_shift = SCM_GCCR_DMA_CLK_EN_OFFSET, .disable = _clk_disable, }; static struct clk csi_clk = { .parent = &hclk, .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, .enable = _clk_enable, .enable_reg = SCM_GCCR, .enable_shift = SCM_GCCR_CSI_CLK_EN_OFFSET, .disable = _clk_disable, }; static struct clk mma_clk = { .parent = &hclk, .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, .enable = _clk_enable, .enable_reg = SCM_GCCR, .enable_shift = SCM_GCCR_MMA_CLK_EN_OFFSET, .disable = _clk_disable, }; static struct clk usbd_clk = { .parent = &clk48m, .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, .enable = _clk_enable, .enable_reg = SCM_GCCR, .enable_shift = SCM_GCCR_USBD_CLK_EN_OFFSET, .disable = _clk_disable, }; static struct clk gpt_clk = { .parent = &perclk[0], .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk uart_clk = { .parent = &perclk[0], .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk i2c_clk = { .parent = &hclk, .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk spi_clk = { .parent = &perclk[1], .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk sdhc_clk = { .parent = &perclk[1], .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk lcdc_clk = { .parent = &perclk[1], .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk mshc_clk = { .parent = &hclk, .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk ssi_clk = { .parent = &perclk[2], .round_rate = _clk_parent_round_rate, .set_rate = _clk_parent_set_rate, }; static struct clk rtc_clk = { .parent = &clk32, }; #define _REGISTER_CLOCK(d, n, c) \ { \ .dev_id = d, \ .con_id = n, \ .clk = &c, \ }, static struct clk_lookup lookups[] __initdata = { _REGISTER_CLOCK(NULL, "dma", dma_clk) _REGISTER_CLOCK("mx1-camera.0", NULL, csi_clk) _REGISTER_CLOCK(NULL, "mma", mma_clk) _REGISTER_CLOCK("imx_udc.0", NULL, usbd_clk) _REGISTER_CLOCK(NULL, "gpt", gpt_clk) _REGISTER_CLOCK("imx1-uart.0", NULL, uart_clk) _REGISTER_CLOCK("imx1-uart.1", NULL, uart_clk) _REGISTER_CLOCK("imx1-uart.2", NULL, uart_clk) _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk) _REGISTER_CLOCK("imx1-cspi.0", NULL, spi_clk) _REGISTER_CLOCK("imx1-cspi.1", NULL, spi_clk) _REGISTER_CLOCK("imx-mmc.0", NULL, sdhc_clk) _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk) _REGISTER_CLOCK(NULL, "mshc", mshc_clk) _REGISTER_CLOCK(NULL, "ssi", ssi_clk) _REGISTER_CLOCK("mxc_rtc.0", NULL, rtc_clk) }; int __init mx1_clocks_init(unsigned long fref) { unsigned int reg; /* disable clocks we are able to */ __raw_writel(0, SCM_GCCR); clk32_rate = fref; reg = __raw_readl(CCM_CSCR); /* detect clock reference for system PLL */ if (reg & CCM_CSCR_SYSTEM_SEL) { prem_clk.parent = &clk16m; } else { /* ensure that oscillator is disabled */ reg &= ~(1 << CCM_CSCR_OSC_EN_SHIFT); __raw_writel(reg, CCM_CSCR); prem_clk.parent = &clk32_premult; } /* detect reference for CLKO */ reg = (reg & CCM_CSCR_CLKO_MASK) >> CCM_CSCR_CLKO_OFFSET; clko_clk.parent = (struct clk *)clko_clocks[reg]; clkdev_add_table(lookups, ARRAY_SIZE(lookups)); clk_enable(&hclk); clk_enable(&fclk); mxc_timer_init(&gpt_clk, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR), MX1_TIM1_INT); return 0; }
gpl-2.0
visi0nary/mediatek
mt6732/kernel/net/irda/irlap.c
10503
34518
/********************************************************************* * * Filename: irlap.c * Version: 1.0 * Description: IrLAP implementation for Linux * Status: Stable * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Mon Aug 4 20:40:53 1997 * Modified at: Tue Dec 14 09:26:44 1999 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved. * Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/slab.h> #include <linux/string.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/random.h> #include <linux/module.h> #include <linux/seq_file.h> #include <net/irda/irda.h> #include <net/irda/irda_device.h> #include <net/irda/irqueue.h> #include <net/irda/irlmp.h> #include <net/irda/irlmp_frame.h> #include <net/irda/irlap_frame.h> #include <net/irda/irlap.h> #include <net/irda/timer.h> #include <net/irda/qos.h> static hashbin_t *irlap = NULL; int sysctl_slot_timeout = SLOT_TIMEOUT * 1000 / HZ; /* This is the delay of missed pf period before generating an event * to the application. The spec mandate 3 seconds, but in some cases * it's way too long. - Jean II */ int sysctl_warn_noreply_time = 3; extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb); static void __irlap_close(struct irlap_cb *self); static void irlap_init_qos_capabilities(struct irlap_cb *self, struct qos_info *qos_user); #ifdef CONFIG_IRDA_DEBUG static const char *const lap_reasons[] = { "ERROR, NOT USED", "LAP_DISC_INDICATION", "LAP_NO_RESPONSE", "LAP_RESET_INDICATION", "LAP_FOUND_NONE", "LAP_MEDIA_BUSY", "LAP_PRIMARY_CONFLICT", "ERROR, NOT USED", }; #endif /* CONFIG_IRDA_DEBUG */ int __init irlap_init(void) { /* Check if the compiler did its job properly. * May happen on some ARM configuration, check with Russell King. */ IRDA_ASSERT(sizeof(struct xid_frame) == 14, ;); IRDA_ASSERT(sizeof(struct test_frame) == 10, ;); IRDA_ASSERT(sizeof(struct ua_frame) == 10, ;); IRDA_ASSERT(sizeof(struct snrm_frame) == 11, ;); /* Allocate master array */ irlap = hashbin_new(HB_LOCK); if (irlap == NULL) { IRDA_ERROR("%s: can't allocate irlap hashbin!\n", __func__); return -ENOMEM; } return 0; } void irlap_cleanup(void) { IRDA_ASSERT(irlap != NULL, return;); hashbin_delete(irlap, (FREE_FUNC) __irlap_close); } /* * Function irlap_open (driver) * * Initialize IrLAP layer * */ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos, const char *hw_name) { struct irlap_cb *self; IRDA_DEBUG(4, "%s()\n", __func__); /* Initialize the irlap structure. */ self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL); if (self == NULL) return NULL; self->magic = LAP_MAGIC; /* Make a binding between the layers */ self->netdev = dev; self->qos_dev = qos; /* Copy hardware name */ if(hw_name != NULL) { strlcpy(self->hw_name, hw_name, sizeof(self->hw_name)); } else { self->hw_name[0] = '\0'; } /* FIXME: should we get our own field? */ dev->atalk_ptr = self; self->state = LAP_OFFLINE; /* Initialize transmit queue */ skb_queue_head_init(&self->txq); skb_queue_head_init(&self->txq_ultra); skb_queue_head_init(&self->wx_list); /* My unique IrLAP device address! */ /* We don't want the broadcast address, neither the NULL address * (most often used to signify "invalid"), and we don't want an * address already in use (otherwise connect won't be able * to select the proper link). - Jean II */ do { get_random_bytes(&self->saddr, sizeof(self->saddr)); } while ((self->saddr == 0x0) || (self->saddr == BROADCAST) || (hashbin_lock_find(irlap, self->saddr, NULL)) ); /* Copy to the driver */ memcpy(dev->dev_addr, &self->saddr, 4); init_timer(&self->slot_timer); init_timer(&self->query_timer); init_timer(&self->discovery_timer); init_timer(&self->final_timer); init_timer(&self->poll_timer); init_timer(&self->wd_timer); init_timer(&self->backoff_timer); init_timer(&self->media_busy_timer); irlap_apply_default_connection_parameters(self); self->N3 = 3; /* # connections attempts to try before giving up */ self->state = LAP_NDM; hashbin_insert(irlap, (irda_queue_t *) self, self->saddr, NULL); irlmp_register_link(self, self->saddr, &self->notify); return self; } EXPORT_SYMBOL(irlap_open); /* * Function __irlap_close (self) * * Remove IrLAP and all allocated memory. Stop any pending timers. * */ static void __irlap_close(struct irlap_cb *self) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Stop timers */ del_timer(&self->slot_timer); del_timer(&self->query_timer); del_timer(&self->discovery_timer); del_timer(&self->final_timer); del_timer(&self->poll_timer); del_timer(&self->wd_timer); del_timer(&self->backoff_timer); del_timer(&self->media_busy_timer); irlap_flush_all_queues(self); self->magic = 0; kfree(self); } /* * Function irlap_close (self) * * Remove IrLAP instance * */ void irlap_close(struct irlap_cb *self) { struct irlap_cb *lap; IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* We used to send a LAP_DISC_INDICATION here, but this was * racy. This has been move within irlmp_unregister_link() * itself. Jean II */ /* Kill the LAP and all LSAPs on top of it */ irlmp_unregister_link(self->saddr); self->notify.instance = NULL; /* Be sure that we manage to remove ourself from the hash */ lap = hashbin_remove(irlap, self->saddr, NULL); if (!lap) { IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__); return; } __irlap_close(lap); } EXPORT_SYMBOL(irlap_close); /* * Function irlap_connect_indication (self, skb) * * Another device is attempting to make a connection * */ void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlap_init_qos_capabilities(self, NULL); /* No user QoS! */ irlmp_link_connect_indication(self->notify.instance, self->saddr, self->daddr, &self->qos_tx, skb); } /* * Function irlap_connect_response (self, skb) * * Service user has accepted incoming connection * */ void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata) { IRDA_DEBUG(4, "%s()\n", __func__); irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL); } /* * Function irlap_connect_request (self, daddr, qos_user, sniff) * * Request connection with another device, sniffing is not implemented * yet. * */ void irlap_connect_request(struct irlap_cb *self, __u32 daddr, struct qos_info *qos_user, int sniff) { IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); self->daddr = daddr; /* * If the service user specifies QoS values for this connection, * then use them */ irlap_init_qos_capabilities(self, qos_user); if ((self->state == LAP_NDM) && !self->media_busy) irlap_do_event(self, CONNECT_REQUEST, NULL, NULL); else self->connect_pending = TRUE; } /* * Function irlap_connect_confirm (self, skb) * * Connection request has been accepted * */ void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); irlmp_link_connect_confirm(self->notify.instance, &self->qos_tx, skb); } /* * Function irlap_data_indication (self, skb) * * Received data frames from IR-port, so we just pass them up to * IrLMP for further processing * */ void irlap_data_indication(struct irlap_cb *self, struct sk_buff *skb, int unreliable) { /* Hide LAP header from IrLMP layer */ skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER); irlmp_link_data_indication(self->notify.instance, skb, unreliable); } /* * Function irlap_data_request (self, skb) * * Queue data for transmission, must wait until XMIT state * */ void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb, int unreliable) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_DEBUG(3, "%s()\n", __func__); IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), return;); skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER); /* * Must set frame format now so that the rest of the code knows * if its dealing with an I or an UI frame */ if (unreliable) skb->data[1] = UI_FRAME; else skb->data[1] = I_FRAME; /* Don't forget to refcount it - see irlmp_connect_request(). */ skb_get(skb); /* Add at the end of the queue (keep ordering) - Jean II */ skb_queue_tail(&self->txq, skb); /* * Send event if this frame only if we are in the right state * FIXME: udata should be sent first! (skb_queue_head?) */ if ((self->state == LAP_XMIT_P) || (self->state == LAP_XMIT_S)) { /* If we are not already processing the Tx queue, trigger * transmission immediately - Jean II */ if((skb_queue_len(&self->txq) <= 1) && (!self->local_busy)) irlap_do_event(self, DATA_REQUEST, skb, NULL); /* Otherwise, the packets will be sent normally at the * next pf-poll - Jean II */ } } /* * Function irlap_unitdata_request (self, skb) * * Send Ultra data. This is data that must be sent outside any connection * */ #ifdef CONFIG_IRDA_ULTRA void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_DEBUG(3, "%s()\n", __func__); IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), return;); skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER); skb->data[0] = CBROADCAST; skb->data[1] = UI_FRAME; /* Don't need to refcount, see irlmp_connless_data_request() */ skb_queue_tail(&self->txq_ultra, skb); irlap_do_event(self, SEND_UI_FRAME, NULL, NULL); } #endif /*CONFIG_IRDA_ULTRA */ /* * Function irlap_udata_indication (self, skb) * * Receive Ultra data. This is data that is received outside any connection * */ #ifdef CONFIG_IRDA_ULTRA void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb) { IRDA_DEBUG(1, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); /* Hide LAP header from IrLMP layer */ skb_pull(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER); irlmp_link_unitdata_indication(self->notify.instance, skb); } #endif /* CONFIG_IRDA_ULTRA */ /* * Function irlap_disconnect_request (void) * * Request to disconnect connection by service user */ void irlap_disconnect_request(struct irlap_cb *self) { IRDA_DEBUG(3, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Don't disconnect until all data frames are successfully sent */ if (!skb_queue_empty(&self->txq)) { self->disconnect_pending = TRUE; return; } /* Check if we are in the right state for disconnecting */ switch (self->state) { case LAP_XMIT_P: /* FALLTHROUGH */ case LAP_XMIT_S: /* FALLTHROUGH */ case LAP_CONN: /* FALLTHROUGH */ case LAP_RESET_WAIT: /* FALLTHROUGH */ case LAP_RESET_CHECK: irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL); break; default: IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__); self->disconnect_pending = TRUE; break; } } /* * Function irlap_disconnect_indication (void) * * Disconnect request from other device * */ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) { IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Flush queues */ irlap_flush_all_queues(self); switch (reason) { case LAP_RESET_INDICATION: IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__); irlap_do_event(self, RESET_REQUEST, NULL, NULL); break; case LAP_NO_RESPONSE: /* FALLTHROUGH */ case LAP_DISC_INDICATION: /* FALLTHROUGH */ case LAP_FOUND_NONE: /* FALLTHROUGH */ case LAP_MEDIA_BUSY: irlmp_link_disconnect_indication(self->notify.instance, self, reason, NULL); break; default: IRDA_ERROR("%s: Unknown reason %d\n", __func__, reason); } } /* * Function irlap_discovery_request (gen_addr_bit) * * Start one single discovery operation. * */ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery) { struct irlap_info info; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(discovery != NULL, return;); IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots); IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) || (discovery->nslots == 8) || (discovery->nslots == 16), return;); /* Discovery is only possible in NDM mode */ if (self->state != LAP_NDM) { IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n", __func__); irlap_discovery_confirm(self, NULL); /* Note : in theory, if we are not in NDM, we could postpone * the discovery like we do for connection request. * In practice, it's not worth it. If the media was busy, * it's likely next time around it won't be busy. If we are * in REPLY state, we will get passive discovery info & event. * Jean II */ return; } /* Check if last discovery request finished in time, or if * it was aborted due to the media busy flag. */ if (self->discovery_log != NULL) { hashbin_delete(self->discovery_log, (FREE_FUNC) kfree); self->discovery_log = NULL; } /* All operations will occur at predictable time, no need to lock */ self->discovery_log = hashbin_new(HB_NOLOCK); if (self->discovery_log == NULL) { IRDA_WARNING("%s(), Unable to allocate discovery log!\n", __func__); return; } info.S = discovery->nslots; /* Number of slots */ info.s = 0; /* Current slot */ self->discovery_cmd = discovery; info.discovery = discovery; /* sysctl_slot_timeout bounds are checked in irsysctl.c - Jean II */ self->slot_timeout = sysctl_slot_timeout * HZ / 1000; irlap_do_event(self, DISCOVERY_REQUEST, NULL, &info); } /* * Function irlap_discovery_confirm (log) * * A device has been discovered in front of this station, we * report directly to LMP. */ void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(self->notify.instance != NULL, return;); /* * Check for successful discovery, since we are then allowed to clear * the media busy condition (IrLAP 6.13.4 - p.94). This should allow * us to make connection attempts much faster and easier (i.e. no * collisions). * Setting media busy to false will also generate an event allowing * to process pending events in NDM state machine. * Note : the spec doesn't define what's a successful discovery is. * If we want Ultra to work, it's successful even if there is * nobody discovered - Jean II */ if (discovery_log) irda_device_set_media_busy(self->netdev, FALSE); /* Inform IrLMP */ irlmp_link_discovery_confirm(self->notify.instance, discovery_log); } /* * Function irlap_discovery_indication (log) * * Somebody is trying to discover us! * */ void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(discovery != NULL, return;); IRDA_ASSERT(self->notify.instance != NULL, return;); /* A device is very likely to connect immediately after it performs * a successful discovery. This means that in our case, we are much * more likely to receive a connection request over the medium. * So, we backoff to avoid collisions. * IrLAP spec 6.13.4 suggest 100ms... * Note : this little trick actually make a *BIG* difference. If I set * my Linux box with discovery enabled and one Ultra frame sent every * second, my Palm has no trouble connecting to it every time ! * Jean II */ irda_device_set_media_busy(self->netdev, SMALL); irlmp_link_discovery_indication(self->notify.instance, discovery); } /* * Function irlap_status_indication (quality_of_link) */ void irlap_status_indication(struct irlap_cb *self, int quality_of_link) { switch (quality_of_link) { case STATUS_NO_ACTIVITY: IRDA_MESSAGE("IrLAP, no activity on link!\n"); break; case STATUS_NOISY: IRDA_MESSAGE("IrLAP, noisy link!\n"); break; default: break; } irlmp_status_indication(self->notify.instance, quality_of_link, LOCK_NO_CHANGE); } /* * Function irlap_reset_indication (void) */ void irlap_reset_indication(struct irlap_cb *self) { IRDA_DEBUG(1, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); if (self->state == LAP_RESET_WAIT) irlap_do_event(self, RESET_REQUEST, NULL, NULL); else irlap_do_event(self, RESET_RESPONSE, NULL, NULL); } /* * Function irlap_reset_confirm (void) */ void irlap_reset_confirm(void) { IRDA_DEBUG(1, "%s()\n", __func__); } /* * Function irlap_generate_rand_time_slot (S, s) * * Generate a random time slot between s and S-1 where * S = Number of slots (0 -> S-1) * s = Current slot */ int irlap_generate_rand_time_slot(int S, int s) { static int rand; int slot; IRDA_ASSERT((S - s) > 0, return 0;); rand += jiffies; rand ^= (rand << 12); rand ^= (rand >> 20); slot = s + rand % (S-s); IRDA_ASSERT((slot >= s) || (slot < S), return 0;); return slot; } /* * Function irlap_update_nr_received (nr) * * Remove all acknowledged frames in current window queue. This code is * not intuitive and you should not try to change it. If you think it * contains bugs, please mail a patch to the author instead. */ void irlap_update_nr_received(struct irlap_cb *self, int nr) { struct sk_buff *skb = NULL; int count = 0; /* * Remove all the ack-ed frames from the window queue. */ /* * Optimize for the common case. It is most likely that the receiver * will acknowledge all the frames we have sent! So in that case we * delete all frames stored in window. */ if (nr == self->vs) { while ((skb = skb_dequeue(&self->wx_list)) != NULL) { dev_kfree_skb(skb); } /* The last acked frame is the next to send minus one */ self->va = nr - 1; } else { /* Remove all acknowledged frames in current window */ while ((skb_peek(&self->wx_list) != NULL) && (((self->va+1) % 8) != nr)) { skb = skb_dequeue(&self->wx_list); dev_kfree_skb(skb); self->va = (self->va + 1) % 8; count++; } } /* Advance window */ self->window = self->window_size - skb_queue_len(&self->wx_list); } /* * Function irlap_validate_ns_received (ns) * * Validate the next to send (ns) field from received frame. */ int irlap_validate_ns_received(struct irlap_cb *self, int ns) { /* ns as expected? */ if (ns == self->vr) return NS_EXPECTED; /* * Stations are allowed to treat invalid NS as unexpected NS * IrLAP, Recv ... with-invalid-Ns. p. 84 */ return NS_UNEXPECTED; /* return NR_INVALID; */ } /* * Function irlap_validate_nr_received (nr) * * Validate the next to receive (nr) field from received frame. * */ int irlap_validate_nr_received(struct irlap_cb *self, int nr) { /* nr as expected? */ if (nr == self->vs) { IRDA_DEBUG(4, "%s(), expected!\n", __func__); return NR_EXPECTED; } /* * unexpected nr? (but within current window), first we check if the * ns numbers of the frames in the current window wrap. */ if (self->va < self->vs) { if ((nr >= self->va) && (nr <= self->vs)) return NR_UNEXPECTED; } else { if ((nr >= self->va) || (nr <= self->vs)) return NR_UNEXPECTED; } /* Invalid nr! */ return NR_INVALID; } /* * Function irlap_initiate_connection_state () * * Initialize the connection state parameters * */ void irlap_initiate_connection_state(struct irlap_cb *self) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Next to send and next to receive */ self->vs = self->vr = 0; /* Last frame which got acked (0 - 1) % 8 */ self->va = 7; self->window = 1; self->remote_busy = FALSE; self->retry_count = 0; } /* * Function irlap_wait_min_turn_around (self, qos) * * Wait negotiated minimum turn around time, this function actually sets * the number of BOS's that must be sent before the next transmitted * frame in order to delay for the specified amount of time. This is * done to avoid using timers, and the forbidden udelay! */ void irlap_wait_min_turn_around(struct irlap_cb *self, struct qos_info *qos) { __u32 min_turn_time; __u32 speed; /* Get QoS values. */ speed = qos->baud_rate.value; min_turn_time = qos->min_turn_time.value; /* No need to calculate XBOFs for speeds over 115200 bps */ if (speed > 115200) { self->mtt_required = min_turn_time; return; } /* * Send additional BOF's for the next frame for the requested * min turn time, so now we must calculate how many chars (XBOF's) we * must send for the requested time period (min turn time) */ self->xbofs_delay = irlap_min_turn_time_in_bytes(speed, min_turn_time); } /* * Function irlap_flush_all_queues (void) * * Flush all queues * */ void irlap_flush_all_queues(struct irlap_cb *self) { struct sk_buff* skb; IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Free transmission queue */ while ((skb = skb_dequeue(&self->txq)) != NULL) dev_kfree_skb(skb); while ((skb = skb_dequeue(&self->txq_ultra)) != NULL) dev_kfree_skb(skb); /* Free sliding window buffered packets */ while ((skb = skb_dequeue(&self->wx_list)) != NULL) dev_kfree_skb(skb); } /* * Function irlap_setspeed (self, speed) * * Change the speed of the IrDA port * */ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now) { struct sk_buff *skb; IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); self->speed = speed; /* Change speed now, or just piggyback speed on frames */ if (now) { /* Send down empty frame to trigger speed change */ skb = alloc_skb(0, GFP_ATOMIC); if (skb) irlap_queue_xmit(self, skb); } } /* * Function irlap_init_qos_capabilities (self, qos) * * Initialize QoS for this IrLAP session, What we do is to compute the * intersection of the QoS capabilities for the user, driver and for * IrLAP itself. Normally, IrLAP will not specify any values, but it can * be used to restrict certain values. */ static void irlap_init_qos_capabilities(struct irlap_cb *self, struct qos_info *qos_user) { IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(self->netdev != NULL, return;); /* Start out with the maximum QoS support possible */ irda_init_max_qos_capabilies(&self->qos_rx); /* Apply drivers QoS capabilities */ irda_qos_compute_intersection(&self->qos_rx, self->qos_dev); /* * Check for user supplied QoS parameters. The service user is only * allowed to supply these values. We check each parameter since the * user may not have set all of them. */ if (qos_user) { IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__); if (qos_user->baud_rate.bits) self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits; if (qos_user->max_turn_time.bits) self->qos_rx.max_turn_time.bits &= qos_user->max_turn_time.bits; if (qos_user->data_size.bits) self->qos_rx.data_size.bits &= qos_user->data_size.bits; if (qos_user->link_disc_time.bits) self->qos_rx.link_disc_time.bits &= qos_user->link_disc_time.bits; } /* Use 500ms in IrLAP for now */ self->qos_rx.max_turn_time.bits &= 0x01; /* Set data size */ /*self->qos_rx.data_size.bits &= 0x03;*/ irda_qos_bits_to_value(&self->qos_rx); } /* * Function irlap_apply_default_connection_parameters (void, now) * * Use the default connection and transmission parameters */ void irlap_apply_default_connection_parameters(struct irlap_cb *self) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* xbofs : Default value in NDM */ self->next_bofs = 12; self->bofs_count = 12; /* NDM Speed is 9600 */ irlap_change_speed(self, 9600, TRUE); /* Set mbusy when going to NDM state */ irda_device_set_media_busy(self->netdev, TRUE); /* * Generate random connection address for this session, which must * be 7 bits wide and different from 0x00 and 0xfe */ while ((self->caddr == 0x00) || (self->caddr == 0xfe)) { get_random_bytes(&self->caddr, sizeof(self->caddr)); self->caddr &= 0xfe; } /* Use default values until connection has been negitiated */ self->slot_timeout = sysctl_slot_timeout; self->final_timeout = FINAL_TIMEOUT; self->poll_timeout = POLL_TIMEOUT; self->wd_timeout = WD_TIMEOUT; /* Set some default values */ self->qos_tx.baud_rate.value = 9600; self->qos_rx.baud_rate.value = 9600; self->qos_tx.max_turn_time.value = 0; self->qos_rx.max_turn_time.value = 0; self->qos_tx.min_turn_time.value = 0; self->qos_rx.min_turn_time.value = 0; self->qos_tx.data_size.value = 64; self->qos_rx.data_size.value = 64; self->qos_tx.window_size.value = 1; self->qos_rx.window_size.value = 1; self->qos_tx.additional_bofs.value = 12; self->qos_rx.additional_bofs.value = 12; self->qos_tx.link_disc_time.value = 0; self->qos_rx.link_disc_time.value = 0; irlap_flush_all_queues(self); self->disconnect_pending = FALSE; self->connect_pending = FALSE; } /* * Function irlap_apply_connection_parameters (qos, now) * * Initialize IrLAP with the negotiated QoS values * * If 'now' is false, the speed and xbofs will be changed after the next * frame is sent. * If 'now' is true, the speed and xbofs is changed immediately */ void irlap_apply_connection_parameters(struct irlap_cb *self, int now) { IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Set the negotiated xbofs value */ self->next_bofs = self->qos_tx.additional_bofs.value; if (now) self->bofs_count = self->next_bofs; /* Set the negotiated link speed (may need the new xbofs value) */ irlap_change_speed(self, self->qos_tx.baud_rate.value, now); self->window_size = self->qos_tx.window_size.value; self->window = self->qos_tx.window_size.value; #ifdef CONFIG_IRDA_DYNAMIC_WINDOW /* * Calculate how many bytes it is possible to transmit before the * link must be turned around */ self->line_capacity = irlap_max_line_capacity(self->qos_tx.baud_rate.value, self->qos_tx.max_turn_time.value); self->bytes_left = self->line_capacity; #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* * Initialize timeout values, some of the rules are listed on * page 92 in IrLAP. */ IRDA_ASSERT(self->qos_tx.max_turn_time.value != 0, return;); IRDA_ASSERT(self->qos_rx.max_turn_time.value != 0, return;); /* The poll timeout applies only to the primary station. * It defines the maximum time the primary stay in XMIT mode * before timeout and turning the link around (sending a RR). * Or, this is how much we can keep the pf bit in primary mode. * Therefore, it must be lower or equal than our *OWN* max turn around. * Jean II */ self->poll_timeout = self->qos_tx.max_turn_time.value * HZ / 1000; /* The Final timeout applies only to the primary station. * It defines the maximum time the primary wait (mostly in RECV mode) * for an answer from the secondary station before polling it again. * Therefore, it must be greater or equal than our *PARTNER* * max turn around time - Jean II */ self->final_timeout = self->qos_rx.max_turn_time.value * HZ / 1000; /* The Watchdog Bit timeout applies only to the secondary station. * It defines the maximum time the secondary wait (mostly in RECV mode) * for poll from the primary station before getting annoyed. * Therefore, it must be greater or equal than our *PARTNER* * max turn around time - Jean II */ self->wd_timeout = self->final_timeout * 2; /* * N1 and N2 are maximum retry count for *both* the final timer * and the wd timer (with a factor 2) as defined above. * After N1 retry of a timer, we give a warning to the user. * After N2 retry, we consider the link dead and disconnect it. * Jean II */ /* * Set N1 to 0 if Link Disconnect/Threshold Time = 3 and set it to * 3 seconds otherwise. See page 71 in IrLAP for more details. * Actually, it's not always 3 seconds, as we allow to set * it via sysctl... Max maxtt is 500ms, and N1 need to be multiple * of 2, so 1 second is minimum we can allow. - Jean II */ if (self->qos_tx.link_disc_time.value == sysctl_warn_noreply_time) /* * If we set N1 to 0, it will trigger immediately, which is * not what we want. What we really want is to disable it, * Jean II */ self->N1 = -2; /* Disable - Need to be multiple of 2*/ else self->N1 = sysctl_warn_noreply_time * 1000 / self->qos_rx.max_turn_time.value; IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1); /* Set N2 to match our own disconnect time */ self->N2 = self->qos_tx.link_disc_time.value * 1000 / self->qos_rx.max_turn_time.value; IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2); } #ifdef CONFIG_PROC_FS struct irlap_iter_state { int id; }; static void *irlap_seq_start(struct seq_file *seq, loff_t *pos) { struct irlap_iter_state *iter = seq->private; struct irlap_cb *self; /* Protect our access to the tsap list */ spin_lock_irq(&irlap->hb_spinlock); iter->id = 0; for (self = (struct irlap_cb *) hashbin_get_first(irlap); self; self = (struct irlap_cb *) hashbin_get_next(irlap)) { if (iter->id == *pos) break; ++iter->id; } return self; } static void *irlap_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct irlap_iter_state *iter = seq->private; ++*pos; ++iter->id; return (void *) hashbin_get_next(irlap); } static void irlap_seq_stop(struct seq_file *seq, void *v) { spin_unlock_irq(&irlap->hb_spinlock); } static int irlap_seq_show(struct seq_file *seq, void *v) { const struct irlap_iter_state *iter = seq->private; const struct irlap_cb *self = v; IRDA_ASSERT(self->magic == LAP_MAGIC, return -EINVAL;); seq_printf(seq, "irlap%d ", iter->id); seq_printf(seq, "state: %s\n", irlap_state[self->state]); seq_printf(seq, " device name: %s, ", (self->netdev) ? self->netdev->name : "bug"); seq_printf(seq, "hardware name: %s\n", self->hw_name); seq_printf(seq, " caddr: %#02x, ", self->caddr); seq_printf(seq, "saddr: %#08x, ", self->saddr); seq_printf(seq, "daddr: %#08x\n", self->daddr); seq_printf(seq, " win size: %d, ", self->window_size); seq_printf(seq, "win: %d, ", self->window); #ifdef CONFIG_IRDA_DYNAMIC_WINDOW seq_printf(seq, "line capacity: %d, ", self->line_capacity); seq_printf(seq, "bytes left: %d\n", self->bytes_left); #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ seq_printf(seq, " tx queue len: %d ", skb_queue_len(&self->txq)); seq_printf(seq, "win queue len: %d ", skb_queue_len(&self->wx_list)); seq_printf(seq, "rbusy: %s", self->remote_busy ? "TRUE" : "FALSE"); seq_printf(seq, " mbusy: %s\n", self->media_busy ? "TRUE" : "FALSE"); seq_printf(seq, " retrans: %d ", self->retry_count); seq_printf(seq, "vs: %d ", self->vs); seq_printf(seq, "vr: %d ", self->vr); seq_printf(seq, "va: %d\n", self->va); seq_printf(seq, " qos\tbps\tmaxtt\tdsize\twinsize\taddbofs\tmintt\tldisc\tcomp\n"); seq_printf(seq, " tx\t%d\t", self->qos_tx.baud_rate.value); seq_printf(seq, "%d\t", self->qos_tx.max_turn_time.value); seq_printf(seq, "%d\t", self->qos_tx.data_size.value); seq_printf(seq, "%d\t", self->qos_tx.window_size.value); seq_printf(seq, "%d\t", self->qos_tx.additional_bofs.value); seq_printf(seq, "%d\t", self->qos_tx.min_turn_time.value); seq_printf(seq, "%d\t", self->qos_tx.link_disc_time.value); seq_printf(seq, "\n"); seq_printf(seq, " rx\t%d\t", self->qos_rx.baud_rate.value); seq_printf(seq, "%d\t", self->qos_rx.max_turn_time.value); seq_printf(seq, "%d\t", self->qos_rx.data_size.value); seq_printf(seq, "%d\t", self->qos_rx.window_size.value); seq_printf(seq, "%d\t", self->qos_rx.additional_bofs.value); seq_printf(seq, "%d\t", self->qos_rx.min_turn_time.value); seq_printf(seq, "%d\n", self->qos_rx.link_disc_time.value); return 0; } static const struct seq_operations irlap_seq_ops = { .start = irlap_seq_start, .next = irlap_seq_next, .stop = irlap_seq_stop, .show = irlap_seq_show, }; static int irlap_seq_open(struct inode *inode, struct file *file) { if (irlap == NULL) return -EINVAL; return seq_open_private(file, &irlap_seq_ops, sizeof(struct irlap_iter_state)); } const struct file_operations irlap_seq_fops = { .owner = THIS_MODULE, .open = irlap_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif /* CONFIG_PROC_FS */
gpl-2.0
Nihhaar/android_kernel_xiaomi_mocha
drivers/misc/cb710/sgbuf2.c
12807
3495
/* * cb710/sgbuf2.c * * Copyright by Michał Mirosław, 2008-2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/cb710.h> static bool sg_dwiter_next(struct sg_mapping_iter *miter) { if (sg_miter_next(miter)) { miter->consumed = 0; return true; } else return false; } static bool sg_dwiter_is_at_end(struct sg_mapping_iter *miter) { return miter->length == miter->consumed && !sg_dwiter_next(miter); } static uint32_t sg_dwiter_read_buffer(struct sg_mapping_iter *miter) { size_t len, left = 4; uint32_t data; void *addr = &data; do { len = min(miter->length - miter->consumed, left); memcpy(addr, miter->addr + miter->consumed, len); miter->consumed += len; left -= len; if (!left) return data; addr += len; } while (sg_dwiter_next(miter)); memset(addr, 0, left); return data; } static inline bool needs_unaligned_copy(const void *ptr) { #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS return false; #else return ((ptr - NULL) & 3) != 0; #endif } static bool sg_dwiter_get_next_block(struct sg_mapping_iter *miter, uint32_t **ptr) { size_t len; if (sg_dwiter_is_at_end(miter)) return true; len = miter->length - miter->consumed; if (likely(len >= 4 && !needs_unaligned_copy( miter->addr + miter->consumed))) { *ptr = miter->addr + miter->consumed; miter->consumed += 4; return true; } return false; } /** * cb710_sg_dwiter_read_next_block() - get next 32-bit word from sg buffer * @miter: sg mapping iterator used for reading * * Description: * Returns 32-bit word starting at byte pointed to by @miter@ * handling any alignment issues. Bytes past the buffer's end * are not accessed (read) but are returned as zeroes. @miter@ * is advanced by 4 bytes or to the end of buffer whichever is * closer. * * Context: * Same requirements as in sg_miter_next(). * * Returns: * 32-bit word just read. */ uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter) { uint32_t *ptr = NULL; if (likely(sg_dwiter_get_next_block(miter, &ptr))) return ptr ? *ptr : 0; return sg_dwiter_read_buffer(miter); } EXPORT_SYMBOL_GPL(cb710_sg_dwiter_read_next_block); static void sg_dwiter_write_slow(struct sg_mapping_iter *miter, uint32_t data) { size_t len, left = 4; void *addr = &data; do { len = min(miter->length - miter->consumed, left); memcpy(miter->addr, addr, len); miter->consumed += len; left -= len; if (!left) return; addr += len; } while (sg_dwiter_next(miter)); } /** * cb710_sg_dwiter_write_next_block() - write next 32-bit word to sg buffer * @miter: sg mapping iterator used for writing * * Description: * Writes 32-bit word starting at byte pointed to by @miter@ * handling any alignment issues. Bytes which would be written * past the buffer's end are silently discarded. @miter@ is * advanced by 4 bytes or to the end of buffer whichever is closer. * * Context: * Same requirements as in sg_miter_next(). */ void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data) { uint32_t *ptr = NULL; if (likely(sg_dwiter_get_next_block(miter, &ptr))) { if (ptr) *ptr = data; else return; } else sg_dwiter_write_slow(miter, data); } EXPORT_SYMBOL_GPL(cb710_sg_dwiter_write_next_block);
gpl-2.0
gautierhattenberger/paparazzi
sw/ground_segment/joystick/sdl_stick.c
8
7981
/* * Basic SDL joystick lib * * based on usb_stick.c * Copyright (C) 2012 The Paparazzi Team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "sdl_stick.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <math.h> //needed for joystick interface #include <SDL/SDL.h> #ifdef STICK_DBG #define dbgprintf fprintf #else #define dbgprintf(x ...) #endif #define MIN_ABS_CODE ABS_X #define MAX_ABS_CODE ABS_MAX+1 #define ABS_MAX_VALUE 254 #define ABS_MID_VALUE 127 #define BUTTON_COUNT STICK_BUTTON_COUNT #define AXIS_COUNT STICK_AXIS_COUNT SDL_Joystick *sdl_joystick; SDL_Event sdl_event; int8_t stick_axis_values[AXIS_COUNT] = {0, 0, 0, 0, 0, 0}; uint8_t stick_hat_value = 0; int32_t stick_button_values = 0; int8_t stick_axis_moved[AXIS_COUNT] = {0}; int stick_axis_count = 0; int stick_button_count = 0; int32_t axis_min[AXIS_COUNT], axis_max[AXIS_COUNT]; int min(int x, int y); int init_sdl_device(int device_index) { int cnt; const char * name; stick_button_count = 0; stick_axis_count = 0; /* Open the device with SDL */ sdl_joystick = SDL_JoystickOpen(device_index); if (sdl_joystick == NULL) { dbgprintf(stderr,"Joystick corresponding to SDL Index %d failed to open! Exiting.\n", device_index); return(1); } /* How many buttons available */ stick_button_count = min(SDL_JoystickNumButtons(sdl_joystick),BUTTON_COUNT); dbgprintf(stderr,"Available button: %d (0x%x)\n",stick_button_count,stick_button_count); if (stick_button_count == 0) { dbgprintf(stderr,"ERROR: no suitable buttons found [%s:%d]\n",__FILE__,__LINE__); } /* How many POV hats available */ int stick_hat_count = SDL_JoystickNumHats(sdl_joystick); dbgprintf(stderr,"Available hats: %d (0x%x)\n",stick_hat_count,stick_hat_count); if (stick_hat_count > 1) { dbgprintf(stderr,"ERROR: only one POV hat supported [%s:%d]\n",__FILE__,__LINE__); } /* How many axes available */ stick_axis_count = min(SDL_JoystickNumAxes(sdl_joystick),AXIS_COUNT); dbgprintf(stderr,"Available axes: %d (0x%x)\n",stick_axis_count,stick_axis_count); if (stick_button_count < 2) { dbgprintf(stderr,"ERROR: not enough suitable axes found [%s:%d]\n",__FILE__,__LINE__); } /* Axis param */ for (cnt = 0; cnt < stick_axis_count; cnt++) { // with joystick interface, all axes are signed 16 bit with full range axis_min[cnt]=-32768; axis_max[cnt]=32768; dbgprintf(stderr,"Axis %d : parameters = [%d,%d]\n", cnt,axis_min[cnt],axis_max[cnt]); } /* Get the device name */ name = SDL_JoystickName(device_index); if (name == NULL) { dbgprintf(stderr,"Error getting name of device with SDL index %i.\n",device_index); } else { printf("Input device name: \"%s\" on SDL device \"%i\"\n", name, device_index); } return(0); } int stick_read( void ) { int cnt; while(SDL_PollEvent(&sdl_event)) { switch(sdl_event.type) { case SDL_JOYBUTTONDOWN: //falls through to JOYBUTTONUP case SDL_JOYBUTTONUP: for (cnt = 0; cnt < stick_button_count; cnt++) { if (sdl_event.jbutton.button == cnt) { if (sdl_event.jbutton.state == SDL_PRESSED) { stick_button_values |= (1 << cnt); // Set bit } else stick_button_values &= ~(1 << cnt); // Clear bit break; } } break; case SDL_JOYHATMOTION: // only one hat (with index 0) supported if (sdl_event.jhat.hat == 0) { stick_hat_value = sdl_event.jhat.value; break; } break; case SDL_JOYAXISMOTION: for (cnt = 0; cnt < stick_axis_count; cnt++) { if (sdl_event.jaxis.axis == cnt) { stick_axis_values[cnt] = (( (sdl_event.jaxis.value - axis_min[cnt]) * ABS_MAX_VALUE ) / (axis_max[cnt] - axis_min[cnt])) - ABS_MID_VALUE; break; } } break; case SDL_QUIT: printf("Quitting...\n"); exit(1); break; default: //do nothing //printf("unknown SDL event!!!\n"); break; } } dbgprintf(stderr, "buttons "); for (cnt = 0; cnt < stick_button_count; cnt++) { dbgprintf(stderr, "%d ", (stick_button_values >> cnt) & 1 ); } dbgprintf(stderr, "| hat "); dbgprintf(stderr, "%d ", stick_hat_value); dbgprintf(stderr, "| axes "); for (cnt = 0; cnt < stick_axis_count; cnt++) { dbgprintf(stderr, "%d ", stick_axis_values[cnt]); } dbgprintf(stderr, "\n"); return 0; } int stick_check_axis(void) { int cnt; while (SDL_PollEvent(&sdl_event)) { switch (sdl_event.type) { case SDL_JOYAXISMOTION: for (cnt = 0; cnt < stick_axis_count; cnt++) { if (sdl_event.jaxis.axis == cnt) { stick_axis_values[cnt] = (((sdl_event.jaxis.value - axis_min[cnt]) * ABS_MAX_VALUE) / (axis_max[cnt] - axis_min[cnt])) - ABS_MID_VALUE; stick_axis_moved[cnt] = 1; break; } } break; case SDL_QUIT: printf("Quitting...\n"); exit(1); break; default: //do nothing break; } } for (cnt = 0; cnt < stick_axis_count; cnt++) { if (stick_axis_moved[cnt] == 0) { return 0; } } return 1; } int stick_init( int device_index ) { int cnt = 0; /* Initialize SDL with joystick support and event support (through video) */ if (SDL_Init(SDL_INIT_JOYSTICK|SDL_INIT_VIDEO) < 0) { printf("Could not initialize SDL: %s.\n", SDL_GetError()); exit(-1); } //Quit SDL at exit atexit(SDL_Quit); //Start the event handler, disable all but joystick events and quit handler SDL_EventState(SDL_ACTIVEEVENT,SDL_IGNORE); SDL_EventState(SDL_KEYDOWN,SDL_IGNORE); SDL_EventState(SDL_KEYUP,SDL_IGNORE); SDL_EventState(SDL_MOUSEMOTION,SDL_IGNORE); SDL_EventState(SDL_MOUSEBUTTONDOWN,SDL_IGNORE); SDL_EventState(SDL_MOUSEBUTTONUP,SDL_IGNORE); //SDL_EventState(SDL_JOYAXISMOTION,SDL_IGNORE); //SDL_EventState(SDL_JOYBALLMOTION,SDL_IGNORE); //SDL_EventState(SDL_JOYHATMOTION,SDL_IGNORE); //SDL_EventState(SDL_JOYBUTTONDOWN,SDL_IGNORE); //SDL_EventState(SDL_JOYBUTTONUP,SDL_IGNORE); SDL_EventState(SDL_VIDEORESIZE,SDL_IGNORE); SDL_EventState(SDL_VIDEOEXPOSE,SDL_IGNORE); //SDL_EventState(SDL_QUIT,SDL_IGNORE); SDL_EventState(SDL_USEREVENT,SDL_IGNORE); SDL_EventState(SDL_SYSWMEVENT,SDL_IGNORE); //Check there are actually joysticks attached if (!SDL_NumJoysticks()) { fprintf(stderr,"Error: No joysticks attached!\n"); SDL_Quit(); return(1); } /* test device_index, else look for a suitable device */ if (init_sdl_device(device_index) != 0) { printf("Failed to open joystick at SDL device index %d, attempting to find a suitable joystick...\n",device_index); for (cnt = 0; cnt < STICK_INPUT_DEV_MAX; cnt++) { if (init_sdl_device(cnt) == 0) break; } printf("Found an alternative device!\n"); } /* return 1 if no device found */ if (cnt == STICK_INPUT_DEV_MAX) { fprintf(stderr,"ERROR: no suitable joystick found [%s:%d]\n", __FILE__,__LINE__); SDL_Quit(); return(1); } return 0; } int min(int x, int y) { return ( x > y ) ? y : x; }
gpl-2.0
mapfau/xbmc
xbmc/pictures/GUIViewStatePictures.cpp
8
3523
/* * Copyright (C) 2005-2013 Team XBMC * http://kodi.tv * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "GUIViewStatePictures.h" #include "FileItem.h" #include "ServiceBroker.h" #include "view/ViewState.h" #include "settings/AdvancedSettings.h" #include "settings/MediaSourceSettings.h" #include "settings/Settings.h" #include "filesystem/Directory.h" #include "guilib/LocalizeStrings.h" #include "guilib/WindowIDs.h" #include "view/ViewStateSettings.h" #include "utils/FileExtensionProvider.h" using namespace XFILE; using namespace ADDON; CGUIViewStateWindowPictures::CGUIViewStateWindowPictures(const CFileItemList& items) : CGUIViewState(items) { if (items.IsVirtualDirectoryRoot()) { AddSortMethod(SortByLabel, 551, LABEL_MASKS()); AddSortMethod(SortByDriveType, 564, LABEL_MASKS()); SetSortMethod(SortByLabel); SetViewAsControl(DEFAULT_VIEW_LIST); SetSortOrder(SortOrderAscending); } else { AddSortMethod(SortByLabel, 551, LABEL_MASKS("%L", "%I", "%L", "")); // Filename, Size | Foldername, empty AddSortMethod(SortBySize, 553, LABEL_MASKS("%L", "%I", "%L", "%I")); // Filename, Size | Foldername, Size AddSortMethod(SortByDate, 552, LABEL_MASKS("%L", "%J", "%L", "%J")); // Filename, Date | Foldername, Date AddSortMethod(SortByDateTaken, 577, LABEL_MASKS("%L", "%t", "%L", "%J")); // Filename, DateTaken | Foldername, Date AddSortMethod(SortByFile, 561, LABEL_MASKS("%L", "%I", "%L", "")); // Filename, Size | FolderName, empty const CViewState *viewState = CViewStateSettings::GetInstance().Get("pictures"); SetSortMethod(viewState->m_sortDescription); SetViewAsControl(viewState->m_viewMode); SetSortOrder(viewState->m_sortDescription.sortOrder); } LoadViewState(items.GetPath(), WINDOW_PICTURES); } void CGUIViewStateWindowPictures::SaveViewState() { SaveViewToDb(m_items.GetPath(), WINDOW_PICTURES, CViewStateSettings::GetInstance().Get("pictures")); } std::string CGUIViewStateWindowPictures::GetLockType() { return "pictures"; } std::string CGUIViewStateWindowPictures::GetExtensions() { std::string extensions = CServiceBroker::GetFileExtensionProvider().GetPictureExtensions(); if (CServiceBroker::GetSettings().GetBool(CSettings::SETTING_PICTURES_SHOWVIDEOS)) extensions += "|" + CServiceBroker::GetFileExtensionProvider().GetVideoExtensions(); return extensions; } VECSOURCES& CGUIViewStateWindowPictures::GetSources() { VECSOURCES *pictureSources = CMediaSourceSettings::GetInstance().GetSources("pictures"); // Guard against source type not existing if (pictureSources == nullptr) { static VECSOURCES empty; return empty; } // Picture add-ons AddAddonsSource("image", g_localizeStrings.Get(1039), "DefaultAddonPicture.png"); // Global sources AddOrReplace(*pictureSources, CGUIViewState::GetSources()); return *pictureSources; }
gpl-2.0
mageec/mageec-gcc
gcc/config/sol2.c
8
9231
/* General Solaris system support. Copyright (C) 2004-2014 Free Software Foundation, Inc. Contributed by CodeSourcery, LLC. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tree.h" #include "stringpool.h" #include "varasm.h" #include "output.h" #include "tm.h" #include "rtl.h" #include "target.h" #include "tm_p.h" #include "diagnostic-core.h" #include "ggc.h" #include "hash-table.h" tree solaris_pending_aligns, solaris_pending_inits, solaris_pending_finis; /* Attach any pending attributes for DECL to the list in *ATTRIBUTES. Pending attributes come from #pragma or _Pragma, so this code is only useful in the C family front ends, but it is included in all languages to avoid changing the target machine initializer depending on the language. */ void solaris_insert_attributes (tree decl, tree *attributes) { tree *x, next; if (solaris_pending_aligns != NULL && TREE_CODE (decl) == VAR_DECL) for (x = &solaris_pending_aligns; *x; x = &TREE_CHAIN (*x)) { tree name = TREE_PURPOSE (*x); tree value = TREE_VALUE (*x); if (DECL_NAME (decl) == name) { if (lookup_attribute ("aligned", DECL_ATTRIBUTES (decl)) || lookup_attribute ("aligned", *attributes)) warning (0, "ignoring %<#pragma align%> for explicitly " "aligned %q+D", decl); else *attributes = tree_cons (get_identifier ("aligned"), value, *attributes); next = TREE_CHAIN (*x); ggc_free (*x); *x = next; break; } } if (solaris_pending_inits != NULL && TREE_CODE (decl) == FUNCTION_DECL) for (x = &solaris_pending_inits; *x; x = &TREE_CHAIN (*x)) { tree name = TREE_PURPOSE (*x); if (DECL_NAME (decl) == name) { *attributes = tree_cons (get_identifier ("init"), NULL, *attributes); TREE_USED (decl) = 1; DECL_PRESERVE_P (decl) = 1; next = TREE_CHAIN (*x); ggc_free (*x); *x = next; break; } } if (solaris_pending_finis != NULL && TREE_CODE (decl) == FUNCTION_DECL) for (x = &solaris_pending_finis; *x; x = &TREE_CHAIN (*x)) { tree name = TREE_PURPOSE (*x); if (DECL_NAME (decl) == name) { *attributes = tree_cons (get_identifier ("fini"), NULL, *attributes); TREE_USED (decl) = 1; DECL_PRESERVE_P (decl) = 1; next = TREE_CHAIN (*x); ggc_free (*x); *x = next; break; } } } /* Output initializer or finalizer entries for DECL to FILE. */ void solaris_output_init_fini (FILE *file, tree decl) { if (lookup_attribute ("init", DECL_ATTRIBUTES (decl))) { fprintf (file, "\t.pushsection\t" SECTION_NAME_FORMAT "\n", ".init"); ASM_OUTPUT_CALL (file, decl); fprintf (file, "\t.popsection\n"); } if (lookup_attribute ("fini", DECL_ATTRIBUTES (decl))) { fprintf (file, "\t.pushsection\t" SECTION_NAME_FORMAT "\n", ".fini"); ASM_OUTPUT_CALL (file, decl); fprintf (file, "\t.popsection\n"); } } /* Emit an assembler directive to set symbol for DECL visibility to the visibility type VIS, which must not be VISIBILITY_DEFAULT. */ void solaris_assemble_visibility (tree decl, int vis ATTRIBUTE_UNUSED) { #ifdef HAVE_GAS_HIDDEN /* Sun as uses .symbolic for STV_PROTECTED. STV_INTERNAL is marked as `currently reserved', but the linker treats it like STV_HIDDEN. Sun Studio 12.1 cc emits .hidden instead. There are 3 Sun extensions GCC doesn't yet know about: STV_EXPORTED, STV_SINGLETON, and STV_ELIMINATE. See Linker and Libraries Guide, Ch. 2, Link-Editor, Defining Additional Symbols, and Ch. 7, Object-File Format, Symbol Table Section. */ static const char * const visibility_types[] = { NULL, "symbolic", "hidden", "hidden" }; const char *name, *type; name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)); type = visibility_types[vis]; fprintf (asm_out_file, "\t.%s\t", type); assemble_name (asm_out_file, name); fprintf (asm_out_file, "\n"); #else if (!DECL_ARTIFICIAL (decl)) warning (OPT_Wattributes, "visibility attribute not supported " "in this configuration; ignored"); #endif } /* Group section information entry stored in solaris_comdat_htab. */ typedef struct comdat_entry { const char *name; unsigned int flags; tree decl; const char *sig; } comdat_entry; /* Helpers for maintaining solaris_comdat_htab. */ struct comdat_entry_hasher : typed_noop_remove <comdat_entry> { typedef comdat_entry value_type; typedef comdat_entry compare_type; static inline hashval_t hash (const value_type *); static inline bool equal (const value_type *, const compare_type *); static inline void remove (value_type *); }; inline hashval_t comdat_entry_hasher::hash (const value_type *entry) { return htab_hash_string (entry->sig); } inline bool comdat_entry_hasher::equal (const value_type *entry1, const compare_type *entry2) { return strcmp (entry1->sig, entry2->sig) == 0; } /* Hash table of group signature symbols. */ static hash_table<comdat_entry_hasher> *solaris_comdat_htab; /* Output assembly to switch to COMDAT group section NAME with attributes FLAGS and group signature symbol DECL, using Sun as syntax. */ void solaris_elf_asm_comdat_section (const char *name, unsigned int flags, tree decl) { const char *signature; char *section; comdat_entry entry, **slot; if (TREE_CODE (decl) == IDENTIFIER_NODE) signature = IDENTIFIER_POINTER (decl); else signature = IDENTIFIER_POINTER (DECL_COMDAT_GROUP (decl)); /* Sun as requires group sections to be fragmented, i.e. to have names of the form <section>%<fragment>. Strictly speaking this is only necessary to support cc -xF, but is enforced globally in violation of the ELF gABI. We keep the section names generated by GCC (generally of the form .text.<signature>) and append %<signature> to pacify as, despite the redundancy. */ section = concat (name, "%", signature, NULL); /* Clear SECTION_LINKONCE flag so targetm.asm_out.named_section only emits this as a regular section. Emit section before .group directive since Sun as treats undeclared sections as @progbits, which conflicts with .bss* sections which are @nobits. */ targetm.asm_out.named_section (section, flags & ~SECTION_LINKONCE, decl); /* Sun as separates declaration of a group section and of the group itself, using the .group directive and the #comdat flag. */ fprintf (asm_out_file, "\t.group\t%s," SECTION_NAME_FORMAT ",#comdat\n", signature, section); /* Unlike GNU as, group signature symbols need to be defined explicitly for Sun as. With a few exceptions, this is already the case. To identify the missing ones without changing the affected frontents, remember the signature symbols and emit those not marked TREE_SYMBOL_REFERENCED in solaris_file_end. */ if (!solaris_comdat_htab) solaris_comdat_htab = new hash_table<comdat_entry_hasher> (37); entry.sig = signature; slot = solaris_comdat_htab->find_slot (&entry, INSERT); if (*slot == NULL) { *slot = XCNEW (comdat_entry); /* Remember fragmented section name. */ (*slot)->name = section; /* Emit as regular section, .group declaration has already been done. */ (*slot)->flags = flags & ~SECTION_LINKONCE; (*slot)->decl = decl; (*slot)->sig = signature; } } /* Define unreferenced COMDAT group signature symbol corresponding to SLOT. */ int solaris_define_comdat_signature (comdat_entry **slot, void *aux ATTRIBUTE_UNUSED) { comdat_entry *entry = *slot; tree decl = entry->decl; if (TREE_CODE (decl) != IDENTIFIER_NODE) decl = DECL_COMDAT_GROUP (decl); if (!TREE_SYMBOL_REFERENCED (decl)) { /* Switch to group section, otherwise Sun as complains `Group Id symbol defined outside of group'. */ switch_to_section (get_section (entry->name, entry->flags, entry->decl)); ASM_OUTPUT_LABEL (asm_out_file, entry->sig); } /* Continue with scan. */ return 1; } /* Emit unreferenced COMDAT group signature symbols for Sun as. */ void solaris_file_end (void) { if (!solaris_comdat_htab) return; solaris_comdat_htab->traverse <void *, solaris_define_comdat_signature> (NULL); } void solaris_override_options (void) { /* Older versions of Solaris ld cannot handle CIE version 3 in .eh_frame. Don't emit DWARF3/4 unless specifically selected if so. */ if (!HAVE_LD_EH_FRAME_CIEV3 && !global_options_set.x_dwarf_version) dwarf_version = 2; }
gpl-2.0
toastcfh/android_kernel_lge_d851
sound/soc/davinci/davinci-i2s.c
8
23357
/* * ALSA SoC I2S (McBSP) Audio Layer for TI DAVINCI processor * * Author: Vladimir Barinov, <vbarinov@embeddedalley.com> * Copyright: (C) 2007 MontaVista Software, Inc., <source@mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/clk.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include <mach/asp.h> #include "davinci-pcm.h" #include "davinci-i2s.h" /* */ #define DAVINCI_MCBSP_DRR_REG 0x00 #define DAVINCI_MCBSP_DXR_REG 0x04 #define DAVINCI_MCBSP_SPCR_REG 0x08 #define DAVINCI_MCBSP_RCR_REG 0x0c #define DAVINCI_MCBSP_XCR_REG 0x10 #define DAVINCI_MCBSP_SRGR_REG 0x14 #define DAVINCI_MCBSP_PCR_REG 0x24 #define DAVINCI_MCBSP_SPCR_RRST (1 << 0) #define DAVINCI_MCBSP_SPCR_RINTM(v) ((v) << 4) #define DAVINCI_MCBSP_SPCR_XRST (1 << 16) #define DAVINCI_MCBSP_SPCR_XINTM(v) ((v) << 20) #define DAVINCI_MCBSP_SPCR_GRST (1 << 22) #define DAVINCI_MCBSP_SPCR_FRST (1 << 23) #define DAVINCI_MCBSP_SPCR_FREE (1 << 25) #define DAVINCI_MCBSP_RCR_RWDLEN1(v) ((v) << 5) #define DAVINCI_MCBSP_RCR_RFRLEN1(v) ((v) << 8) #define DAVINCI_MCBSP_RCR_RDATDLY(v) ((v) << 16) #define DAVINCI_MCBSP_RCR_RFIG (1 << 18) #define DAVINCI_MCBSP_RCR_RWDLEN2(v) ((v) << 21) #define DAVINCI_MCBSP_RCR_RFRLEN2(v) ((v) << 24) #define DAVINCI_MCBSP_RCR_RPHASE BIT(31) #define DAVINCI_MCBSP_XCR_XWDLEN1(v) ((v) << 5) #define DAVINCI_MCBSP_XCR_XFRLEN1(v) ((v) << 8) #define DAVINCI_MCBSP_XCR_XDATDLY(v) ((v) << 16) #define DAVINCI_MCBSP_XCR_XFIG (1 << 18) #define DAVINCI_MCBSP_XCR_XWDLEN2(v) ((v) << 21) #define DAVINCI_MCBSP_XCR_XFRLEN2(v) ((v) << 24) #define DAVINCI_MCBSP_XCR_XPHASE BIT(31) #define DAVINCI_MCBSP_SRGR_FWID(v) ((v) << 8) #define DAVINCI_MCBSP_SRGR_FPER(v) ((v) << 16) #define DAVINCI_MCBSP_SRGR_FSGM (1 << 28) #define DAVINCI_MCBSP_SRGR_CLKSM BIT(29) #define DAVINCI_MCBSP_PCR_CLKRP (1 << 0) #define DAVINCI_MCBSP_PCR_CLKXP (1 << 1) #define DAVINCI_MCBSP_PCR_FSRP (1 << 2) #define DAVINCI_MCBSP_PCR_FSXP (1 << 3) #define DAVINCI_MCBSP_PCR_SCLKME (1 << 7) #define DAVINCI_MCBSP_PCR_CLKRM (1 << 8) #define DAVINCI_MCBSP_PCR_CLKXM (1 << 9) #define DAVINCI_MCBSP_PCR_FSRM (1 << 10) #define DAVINCI_MCBSP_PCR_FSXM (1 << 11) enum { DAVINCI_MCBSP_WORD_8 = 0, DAVINCI_MCBSP_WORD_12, DAVINCI_MCBSP_WORD_16, DAVINCI_MCBSP_WORD_20, DAVINCI_MCBSP_WORD_24, DAVINCI_MCBSP_WORD_32, }; static const unsigned char data_type[SNDRV_PCM_FORMAT_S32_LE + 1] = { [SNDRV_PCM_FORMAT_S8] = 1, [SNDRV_PCM_FORMAT_S16_LE] = 2, [SNDRV_PCM_FORMAT_S32_LE] = 4, }; static const unsigned char asp_word_length[SNDRV_PCM_FORMAT_S32_LE + 1] = { [SNDRV_PCM_FORMAT_S8] = DAVINCI_MCBSP_WORD_8, [SNDRV_PCM_FORMAT_S16_LE] = DAVINCI_MCBSP_WORD_16, [SNDRV_PCM_FORMAT_S32_LE] = DAVINCI_MCBSP_WORD_32, }; static const unsigned char double_fmt[SNDRV_PCM_FORMAT_S32_LE + 1] = { [SNDRV_PCM_FORMAT_S8] = SNDRV_PCM_FORMAT_S16_LE, [SNDRV_PCM_FORMAT_S16_LE] = SNDRV_PCM_FORMAT_S32_LE, }; struct davinci_mcbsp_dev { struct device *dev; struct davinci_pcm_dma_params dma_params[2]; void __iomem *base; #define MOD_DSP_A 0 #define MOD_DSP_B 1 int mode; u32 pcr; struct clk *clk; /* */ unsigned enable_channel_combine:1; unsigned int fmt; int clk_div; int clk_input_pin; bool i2s_accurate_sck; }; static inline void davinci_mcbsp_write_reg(struct davinci_mcbsp_dev *dev, int reg, u32 val) { __raw_writel(val, dev->base + reg); } static inline u32 davinci_mcbsp_read_reg(struct davinci_mcbsp_dev *dev, int reg) { return __raw_readl(dev->base + reg); } static void toggle_clock(struct davinci_mcbsp_dev *dev, int playback) { u32 m = playback ? DAVINCI_MCBSP_PCR_CLKXP : DAVINCI_MCBSP_PCR_CLKRP; /* */ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr ^ m); davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr); } static void davinci_mcbsp_start(struct davinci_mcbsp_dev *dev, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_platform *platform = rtd->platform; int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); u32 spcr; u32 mask = playback ? DAVINCI_MCBSP_SPCR_XRST : DAVINCI_MCBSP_SPCR_RRST; spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); if (spcr & mask) { /* */ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr & ~mask); toggle_clock(dev, playback); } if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM)) { /* */ spcr |= DAVINCI_MCBSP_SPCR_GRST; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); } if (playback) { /* */ /* */ if (platform->driver->ops->trigger) { int ret = platform->driver->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); if (ret < 0) printk(KERN_DEBUG "Playback DMA stop failed\n"); } /* */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); spcr |= DAVINCI_MCBSP_SPCR_XRST; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); /* */ udelay(100); /* */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); spcr &= ~DAVINCI_MCBSP_SPCR_XRST; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); toggle_clock(dev, playback); /* */ if (platform->driver->ops->trigger) { int ret = platform->driver->ops->trigger(substream, SNDRV_PCM_TRIGGER_START); if (ret < 0) printk(KERN_DEBUG "Playback DMA start failed\n"); } } /* */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); spcr |= mask; if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM)) { /* */ spcr |= DAVINCI_MCBSP_SPCR_FRST; } davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); } static void davinci_mcbsp_stop(struct davinci_mcbsp_dev *dev, int playback) { u32 spcr; /* */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); spcr &= ~(DAVINCI_MCBSP_SPCR_GRST | DAVINCI_MCBSP_SPCR_FRST); spcr &= playback ? ~DAVINCI_MCBSP_SPCR_XRST : ~DAVINCI_MCBSP_SPCR_RRST; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); toggle_clock(dev, playback); } #define DEFAULT_BITPERSAMPLE 16 static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(cpu_dai); unsigned int pcr; unsigned int srgr; bool inv_fs = false; /* */ srgr = DAVINCI_MCBSP_SRGR_FSGM | DAVINCI_MCBSP_SRGR_FPER(DEFAULT_BITPERSAMPLE * 2 - 1) | DAVINCI_MCBSP_SRGR_FWID(DEFAULT_BITPERSAMPLE - 1); dev->fmt = fmt; /* */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: /* */ pcr = DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM; break; case SND_SOC_DAIFMT_CBM_CFS: pcr = DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_FSXM; /* */ switch (dev->clk_input_pin) { case MCBSP_CLKS: pcr |= DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM; break; case MCBSP_CLKR: pcr |= DAVINCI_MCBSP_PCR_SCLKME; break; default: dev_err(dev->dev, "bad clk_input_pin\n"); return -EINVAL; } break; case SND_SOC_DAIFMT_CBM_CFM: /* */ pcr = 0; break; default: printk(KERN_ERR "%s:bad master\n", __func__); return -EINVAL; } /* */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: /* */ inv_fs = true; case SND_SOC_DAIFMT_DSP_A: dev->mode = MOD_DSP_A; break; case SND_SOC_DAIFMT_DSP_B: dev->mode = MOD_DSP_B; break; default: printk(KERN_ERR "%s:bad format\n", __func__); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: /* */ pcr |= (DAVINCI_MCBSP_PCR_CLKXP | DAVINCI_MCBSP_PCR_CLKRP); break; case SND_SOC_DAIFMT_IB_IF: /* */ pcr |= (DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP); break; case SND_SOC_DAIFMT_NB_IF: /* */ pcr |= (DAVINCI_MCBSP_PCR_CLKXP | DAVINCI_MCBSP_PCR_CLKRP | DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP); break; case SND_SOC_DAIFMT_IB_NF: /* */ break; default: return -EINVAL; } if (inv_fs == true) pcr ^= (DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP); davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr); dev->pcr = pcr; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, pcr); return 0; } static int davinci_i2s_dai_set_clkdiv(struct snd_soc_dai *cpu_dai, int div_id, int div) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(cpu_dai); if (div_id != DAVINCI_MCBSP_CLKGDV) return -ENODEV; dev->clk_div = div; return 0; } static int davinci_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); struct davinci_pcm_dma_params *dma_params = &dev->dma_params[substream->stream]; struct snd_interval *i = NULL; int mcbsp_word_length, master; unsigned int rcr, xcr, srgr, clk_div, freq, framesize; u32 spcr; snd_pcm_format_t fmt; unsigned element_cnt = 1; /* */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { spcr |= DAVINCI_MCBSP_SPCR_RINTM(3) | DAVINCI_MCBSP_SPCR_FREE; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); } else { spcr |= DAVINCI_MCBSP_SPCR_XINTM(3) | DAVINCI_MCBSP_SPCR_FREE; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); } master = dev->fmt & SND_SOC_DAIFMT_MASTER_MASK; fmt = params_format(params); mcbsp_word_length = asp_word_length[fmt]; switch (master) { case SND_SOC_DAIFMT_CBS_CFS: freq = clk_get_rate(dev->clk); srgr = DAVINCI_MCBSP_SRGR_FSGM | DAVINCI_MCBSP_SRGR_CLKSM; srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length * 8 - 1); if (dev->i2s_accurate_sck) { clk_div = 256; do { framesize = (freq / (--clk_div)) / params->rate_num * params->rate_den; } while (((framesize < 33) || (framesize > 4095)) && (clk_div)); clk_div--; srgr |= DAVINCI_MCBSP_SRGR_FPER(framesize - 1); } else { /* */ clk_div = freq / (mcbsp_word_length * 16) / params->rate_num * params->rate_den; srgr |= DAVINCI_MCBSP_SRGR_FPER(mcbsp_word_length * 16 - 1); } clk_div &= 0xFF; srgr |= clk_div; break; case SND_SOC_DAIFMT_CBM_CFS: srgr = DAVINCI_MCBSP_SRGR_FSGM; clk_div = dev->clk_div - 1; srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length * 8 - 1); srgr |= DAVINCI_MCBSP_SRGR_FPER(mcbsp_word_length * 16 - 1); clk_div &= 0xFF; srgr |= clk_div; break; case SND_SOC_DAIFMT_CBM_CFM: /* */ i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); srgr = DAVINCI_MCBSP_SRGR_FSGM; srgr |= DAVINCI_MCBSP_SRGR_FWID(snd_interval_value(i) - 1); pr_debug("%s - %d FWID set: re-read srgr = %X\n", __func__, __LINE__, snd_interval_value(i) - 1); i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_FRAME_BITS); srgr |= DAVINCI_MCBSP_SRGR_FPER(snd_interval_value(i) - 1); break; default: return -EINVAL; } davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr); rcr = DAVINCI_MCBSP_RCR_RFIG; xcr = DAVINCI_MCBSP_XCR_XFIG; if (dev->mode == MOD_DSP_B) { rcr |= DAVINCI_MCBSP_RCR_RDATDLY(0); xcr |= DAVINCI_MCBSP_XCR_XDATDLY(0); } else { rcr |= DAVINCI_MCBSP_RCR_RDATDLY(1); xcr |= DAVINCI_MCBSP_XCR_XDATDLY(1); } /* */ fmt = params_format(params); if ((fmt > SNDRV_PCM_FORMAT_S32_LE) || !data_type[fmt]) { printk(KERN_WARNING "davinci-i2s: unsupported PCM format\n"); return -EINVAL; } if (params_channels(params) == 2) { element_cnt = 2; if (double_fmt[fmt] && dev->enable_channel_combine) { element_cnt = 1; fmt = double_fmt[fmt]; } switch (master) { case SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_CBS_CFM: rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(0); xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(0); rcr |= DAVINCI_MCBSP_RCR_RPHASE; xcr |= DAVINCI_MCBSP_XCR_XPHASE; break; case SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_CBM_CFS: rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(element_cnt - 1); xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(element_cnt - 1); break; default: return -EINVAL; } } dma_params->acnt = dma_params->data_type = data_type[fmt]; dma_params->fifo_level = 0; mcbsp_word_length = asp_word_length[fmt]; switch (master) { case SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_CBS_CFM: rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(0); xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(0); break; case SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_CBM_CFS: rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(element_cnt - 1); xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(element_cnt - 1); break; default: return -EINVAL; } rcr |= DAVINCI_MCBSP_RCR_RWDLEN1(mcbsp_word_length) | DAVINCI_MCBSP_RCR_RWDLEN2(mcbsp_word_length); xcr |= DAVINCI_MCBSP_XCR_XWDLEN1(mcbsp_word_length) | DAVINCI_MCBSP_XCR_XWDLEN2(mcbsp_word_length); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_XCR_REG, xcr); else davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_RCR_REG, rcr); pr_debug("%s - %d srgr=%X\n", __func__, __LINE__, srgr); pr_debug("%s - %d xcr=%X\n", __func__, __LINE__, xcr); pr_debug("%s - %d rcr=%X\n", __func__, __LINE__, rcr); return 0; } static int davinci_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); davinci_mcbsp_stop(dev, playback); return 0; } static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); int ret = 0; int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: davinci_mcbsp_start(dev, substream); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: davinci_mcbsp_stop(dev, playback); break; default: ret = -EINVAL; } return ret; } static int davinci_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); snd_soc_dai_set_dma_data(dai, substream, dev->dma_params); return 0; } static void davinci_i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); davinci_mcbsp_stop(dev, playback); } #define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000 static const struct snd_soc_dai_ops davinci_i2s_dai_ops = { .startup = davinci_i2s_startup, .shutdown = davinci_i2s_shutdown, .prepare = davinci_i2s_prepare, .trigger = davinci_i2s_trigger, .hw_params = davinci_i2s_hw_params, .set_fmt = davinci_i2s_set_dai_fmt, .set_clkdiv = davinci_i2s_dai_set_clkdiv, }; static struct snd_soc_dai_driver davinci_i2s_dai = { .playback = { .channels_min = 2, .channels_max = 2, .rates = DAVINCI_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE,}, .capture = { .channels_min = 2, .channels_max = 2, .rates = DAVINCI_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE,}, .ops = &davinci_i2s_dai_ops, }; static int davinci_i2s_probe(struct platform_device *pdev) { struct snd_platform_data *pdata = pdev->dev.platform_data; struct davinci_mcbsp_dev *dev; struct resource *mem, *ioarea, *res; enum dma_event_q asp_chan_q = EVENTQ_0; enum dma_event_q ram_chan_q = EVENTQ_1; int ret; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } ioarea = devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), pdev->name); if (!ioarea) { dev_err(&pdev->dev, "McBSP region already claimed\n"); return -EBUSY; } dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_mcbsp_dev), GFP_KERNEL); if (!dev) return -ENOMEM; if (pdata) { dev->enable_channel_combine = pdata->enable_channel_combine; dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].sram_size = pdata->sram_size_playback; dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].sram_size = pdata->sram_size_capture; dev->clk_input_pin = pdata->clk_input_pin; dev->i2s_accurate_sck = pdata->i2s_accurate_sck; asp_chan_q = pdata->asp_chan_q; ram_chan_q = pdata->ram_chan_q; } dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].asp_chan_q = asp_chan_q; dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].ram_chan_q = ram_chan_q; dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].asp_chan_q = asp_chan_q; dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].ram_chan_q = ram_chan_q; dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) return -ENODEV; clk_enable(dev->clk); dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!dev->base) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_release_clk; } dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].dma_addr = (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG); dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].dma_addr = (dma_addr_t)(mem->start + DAVINCI_MCBSP_DRR_REG); /* */ res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!res) { dev_err(&pdev->dev, "no DMA resource\n"); ret = -ENXIO; goto err_release_clk; } dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].channel = res->start; res = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!res) { dev_err(&pdev->dev, "no DMA resource\n"); ret = -ENXIO; goto err_release_clk; } dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel = res->start; dev->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, dev); ret = snd_soc_register_dai(&pdev->dev, &davinci_i2s_dai); if (ret != 0) goto err_release_clk; return 0; err_release_clk: clk_disable(dev->clk); clk_put(dev->clk); return ret; } static int davinci_i2s_remove(struct platform_device *pdev) { struct davinci_mcbsp_dev *dev = dev_get_drvdata(&pdev->dev); snd_soc_unregister_dai(&pdev->dev); clk_disable(dev->clk); clk_put(dev->clk); dev->clk = NULL; return 0; } static struct platform_driver davinci_mcbsp_driver = { .probe = davinci_i2s_probe, .remove = davinci_i2s_remove, .driver = { .name = "davinci-mcbsp", .owner = THIS_MODULE, }, }; module_platform_driver(davinci_mcbsp_driver); MODULE_AUTHOR("Vladimir Barinov"); MODULE_DESCRIPTION("TI DAVINCI I2S (McBSP) SoC Interface"); MODULE_LICENSE("GPL");
gpl-2.0
duythanhphan/glibc
nptl/sysdeps/unix/sysv/linux/sparc/sem_wait.c
8
3077
/* sem_wait -- wait on a semaphore. Generic futex-using version. Copyright (C) 2003-2013 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <errno.h> #include <sysdep.h> #include <lowlevellock.h> #include <internaltypes.h> #include <semaphore.h> #include <pthreadP.h> #include <shlib-compat.h> void attribute_hidden __sem_wait_cleanup (void *arg) { struct sparc_new_sem *isem = (struct sparc_new_sem *) arg; atomic_decrement (&isem->nwaiters); } /* This is in a seperate function in order to make sure gcc puts the call site into an exception region, and thus the cleanups get properly run. */ static int __attribute__ ((noinline)) do_futex_wait (struct sparc_new_sem *isem) { int err, oldtype = __pthread_enable_asynccancel (); err = lll_futex_wait (&isem->value, 0, isem->private ^ FUTEX_PRIVATE_FLAG); __pthread_disable_asynccancel (oldtype); return err; } int __new_sem_wait (sem_t *sem) { struct sparc_new_sem *isem = (struct sparc_new_sem *) sem; int err; if (atomic_decrement_if_positive (&isem->value) > 0) return 0; atomic_increment (&isem->nwaiters); pthread_cleanup_push (__sem_wait_cleanup, isem); while (1) { err = do_futex_wait(isem); if (err != 0 && err != -EWOULDBLOCK) { __set_errno (-err); err = -1; break; } if (atomic_decrement_if_positive (&isem->value) > 0) { err = 0; break; } } pthread_cleanup_pop (0); atomic_decrement (&isem->nwaiters); return err; } versioned_symbol (libpthread, __new_sem_wait, sem_wait, GLIBC_2_1); #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1) int attribute_compat_text_section __old_sem_wait (sem_t *sem) { struct sparc_old_sem *isem = (struct sparc_old_sem *) sem; int err; do { if (atomic_decrement_if_positive (&isem->value) > 0) return 0; /* Enable asynchronous cancellation. Required by the standard. */ int oldtype = __pthread_enable_asynccancel (); err = lll_futex_wait (&isem->value, 0, isem->private ^ FUTEX_PRIVATE_FLAG); /* Disable asynchronous cancellation. */ __pthread_disable_asynccancel (oldtype); } while (err == 0 || err == -EWOULDBLOCK); __set_errno (-err); return -1; } compat_symbol (libpthread, __old_sem_wait, sem_wait, GLIBC_2_0); #endif
gpl-2.0
Zerixx/TrinityCore
src/server/scripts/Outland/CoilfangReservoir/SerpentShrine/boss_fathomlord_karathress.cpp
8
23417
/* * Copyright (C) 2008-2018 TrinityCore <https://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Boss_Fathomlord_Karathress SD%Complete: 70 SDComment: Cyclone workaround SDCategory: Coilfang Resevoir, Serpent Shrine Cavern EndScriptData */ #include "ScriptMgr.h" #include "InstanceScript.h" #include "MotionMaster.h" #include "ObjectAccessor.h" #include "ScriptedEscortAI.h" #include "serpent_shrine.h" #include "TemporarySummon.h" enum FathomlordKarathress { SAY_AGGRO = 0, SAY_GAIN_BLESSING = 1, SAY_GAIN_ABILITY1 = 2, SAY_GAIN_ABILITY2 = 3, SAY_GAIN_ABILITY3 = 4, SAY_SLAY = 5, SAY_DEATH = 6, //Karathress spells SPELL_CATACLYSMIC_BOLT = 38441, SPELL_POWER_OF_SHARKKIS = 38455, SPELL_POWER_OF_TIDALVESS = 38452, SPELL_POWER_OF_CARIBDIS = 38451, SPELL_ENRAGE = 24318, SPELL_SEAR_NOVA = 38445, SPELL_BLESSING_OF_THE_TIDES = 38449, //Sharkkis spells SPELL_LEECHING_THROW = 29436, SPELL_THE_BEAST_WITHIN = 38373, SPELL_MULTISHOT = 38366, SPELL_SUMMON_FATHOM_LURKER = 38433, SPELL_SUMMON_FATHOM_SPOREBAT = 38431, SPELL_PET_ENRAGE = 19574, //Tidalvess spells SPELL_FROST_SHOCK = 38234, SPELL_SPITFIRE_TOTEM = 38236, SPELL_POISON_CLEANSING_TOTEM = 38306, // Spell obsolete SPELL_EARTHBIND_TOTEM = 38304, SPELL_EARTHBIND_TOTEM_EFFECT = 6474, SPELL_WINDFURY_WEAPON = 38184, //Caribdis Spells SPELL_WATER_BOLT_VOLLEY = 38335, SPELL_TIDAL_SURGE = 38358, SPELL_TIDAL_SURGE_FREEZE = 38357, SPELL_HEAL = 38330, SPELL_SUMMON_CYCLONE = 38337, SPELL_CYCLONE_CYCLONE = 29538, //Yells and Quotes SOUND_GAIN_BLESSING_OF_TIDES = 11278, SOUND_MISC = 11283, //Summoned Unit GUIDs CREATURE_CYCLONE = 22104, CREATURE_FATHOM_SPOREBAT = 22120, CREATURE_FATHOM_LURKER = 22119, CREATURE_SPITFIRE_TOTEM = 22091, CREATURE_EARTHBIND_TOTEM = 22486, CREATURE_POISON_CLEANSING_TOTEM = 22487, }; //entry and position for Seer Olum #define SEER_OLUM 22820 #define OLUM_X 446.78f #define OLUM_Y -542.76f #define OLUM_Z -7.54773f #define OLUM_O 0.401581f #define SAY_GAIN_BLESSING_OF_TIDES "Your overconfidence will be your undoing! Guards, lend me your strength!" #define SAY_MISC "Alana be'lendor!" //don't know what use this #define MAX_ADVISORS 3 //Fathom-Lord Karathress AI class boss_fathomlord_karathress : public CreatureScript { public: boss_fathomlord_karathress() : CreatureScript("boss_fathomlord_karathress") { } CreatureAI* GetAI(Creature* creature) const override { return GetSerpentshrineCavernAI<boss_fathomlord_karathressAI>(creature); } struct boss_fathomlord_karathressAI : public ScriptedAI { boss_fathomlord_karathressAI(Creature* creature) : ScriptedAI(creature) { Initialize(); instance = creature->GetInstanceScript(); } void Initialize() { CataclysmicBolt_Timer = 10000; Enrage_Timer = 600000; //10 minutes SearNova_Timer = 20000 + rand32() % 40000; // 20 - 60 seconds BlessingOfTides = false; } InstanceScript* instance; uint32 CataclysmicBolt_Timer; uint32 Enrage_Timer; uint32 SearNova_Timer; bool BlessingOfTides; ObjectGuid Advisors[MAX_ADVISORS]; void Reset() override { Initialize(); ObjectGuid RAdvisors[MAX_ADVISORS]; RAdvisors[0] = instance->GetGuidData(DATA_SHARKKIS); RAdvisors[1] = instance->GetGuidData(DATA_TIDALVESS); RAdvisors[2] = instance->GetGuidData(DATA_CARIBDIS); // Respawn of the 3 Advisors for (uint8 i = 0; i < MAX_ADVISORS; ++i) { if (!RAdvisors[i].IsEmpty()) { Creature* advisor = ObjectAccessor::GetCreature(*me, RAdvisors[i]); if (advisor && !advisor->IsAlive()) { advisor->Respawn(); advisor->AI()->EnterEvadeMode(); advisor->GetMotionMaster()->MoveTargetedHome(); } } } instance->SetData(DATA_KARATHRESSEVENT, NOT_STARTED); } void EventSharkkisDeath() { Talk(SAY_GAIN_ABILITY1); DoCast(me, SPELL_POWER_OF_SHARKKIS); } void EventTidalvessDeath() { Talk(SAY_GAIN_ABILITY2); DoCast(me, SPELL_POWER_OF_TIDALVESS); } void EventCaribdisDeath() { Talk(SAY_GAIN_ABILITY3); DoCast(me, SPELL_POWER_OF_CARIBDIS); } void GetAdvisors() { Advisors[0] = instance->GetGuidData(DATA_SHARKKIS); Advisors[1] = instance->GetGuidData(DATA_TIDALVESS); Advisors[2] = instance->GetGuidData(DATA_CARIBDIS); } void StartEvent(Unit* who) { GetAdvisors(); Talk(SAY_AGGRO); DoZoneInCombat(); instance->SetGuidData(DATA_KARATHRESSEVENT_STARTER, who->GetGUID()); instance->SetData(DATA_KARATHRESSEVENT, IN_PROGRESS); } void KilledUnit(Unit* /*victim*/) override { Talk(SAY_SLAY); } void JustDied(Unit* /*killer*/) override { Talk(SAY_DEATH); instance->SetData(DATA_FATHOMLORDKARATHRESSEVENT, DONE); //support for quest 10944 me->SummonCreature(SEER_OLUM, OLUM_X, OLUM_Y, OLUM_Z, OLUM_O, TEMPSUMMON_TIMED_DESPAWN, 3600000); } void EnterCombat(Unit* who) override { StartEvent(who); } void UpdateAI(uint32 diff) override { //Only if not incombat check if the event is started if (!me->IsInCombat() && instance->GetData(DATA_KARATHRESSEVENT)) { if (Unit* target = ObjectAccessor::GetUnit(*me, instance->GetGuidData(DATA_KARATHRESSEVENT_STARTER))) { AttackStart(target); GetAdvisors(); } } //Return since we have no target if (!UpdateVictim()) return; //someone evaded! if (!instance->GetData(DATA_KARATHRESSEVENT)) { EnterEvadeMode(); return; } //CataclysmicBolt_Timer if (CataclysmicBolt_Timer <= diff) { //select a random unit other than the main tank Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 1); //if there aren't other units, cast on the tank if (!target) target = me->GetVictim(); if (target) DoCast(target, SPELL_CATACLYSMIC_BOLT); CataclysmicBolt_Timer = 10000; } else CataclysmicBolt_Timer -= diff; //SearNova_Timer if (SearNova_Timer <= diff) { DoCastVictim(SPELL_SEAR_NOVA); SearNova_Timer = 20000 + rand32() % 40000; } else SearNova_Timer -= diff; //Enrage_Timer if (Enrage_Timer <= diff) { DoCast(me, SPELL_ENRAGE); Enrage_Timer = 90000; } else Enrage_Timer -= diff; //Blessing of Tides Trigger if (!HealthAbovePct(75) && !BlessingOfTides) { BlessingOfTides = true; bool continueTriggering = false; for (uint8 i = 0; i < MAX_ADVISORS; ++i) { if (!Advisors[i].IsEmpty()) { Creature* advisor = ObjectAccessor::GetCreature(*me, Advisors[i]); if (advisor && advisor->IsAlive()) { continueTriggering = true; break; } } } if (continueTriggering) { DoCast(me, SPELL_BLESSING_OF_THE_TIDES); me->Yell(SAY_GAIN_BLESSING_OF_TIDES, LANG_UNIVERSAL); DoPlaySoundToSet(me, SOUND_GAIN_BLESSING_OF_TIDES); } } DoMeleeAttackIfReady(); } }; }; //Fathom-Guard Sharkkis AI class boss_fathomguard_sharkkis : public CreatureScript { public: boss_fathomguard_sharkkis() : CreatureScript("boss_fathomguard_sharkkis") { } CreatureAI* GetAI(Creature* creature) const override { return GetSerpentshrineCavernAI<boss_fathomguard_sharkkisAI>(creature); } struct boss_fathomguard_sharkkisAI : public ScriptedAI { boss_fathomguard_sharkkisAI(Creature* creature) : ScriptedAI(creature) { Initialize(); instance = creature->GetInstanceScript(); } void Initialize() { LeechingThrow_Timer = 20000; TheBeastWithin_Timer = 30000; Multishot_Timer = 15000; Pet_Timer = 10000; pet = false; } InstanceScript* instance; uint32 LeechingThrow_Timer; uint32 TheBeastWithin_Timer; uint32 Multishot_Timer; uint32 Pet_Timer; bool pet; ObjectGuid SummonedPet; void Reset() override { Initialize(); Creature* Pet = ObjectAccessor::GetCreature(*me, SummonedPet); if (Pet && Pet->IsAlive()) Pet->DealDamage(Pet, Pet->GetHealth(), NULL, DIRECT_DAMAGE, SPELL_SCHOOL_MASK_NORMAL, NULL, false); SummonedPet.Clear(); instance->SetData(DATA_KARATHRESSEVENT, NOT_STARTED); } void JustDied(Unit* /*killer*/) override { if (Creature* Karathress = ObjectAccessor::GetCreature(*me, instance->GetGuidData(DATA_KARATHRESS))) ENSURE_AI(boss_fathomlord_karathress::boss_fathomlord_karathressAI, Karathress->AI())->EventSharkkisDeath(); } void EnterCombat(Unit* who) override { instance->SetGuidData(DATA_KARATHRESSEVENT_STARTER, who->GetGUID()); instance->SetData(DATA_KARATHRESSEVENT, IN_PROGRESS); } void UpdateAI(uint32 diff) override { //Only if not incombat check if the event is started if (!me->IsInCombat() && instance->GetData(DATA_KARATHRESSEVENT)) { if (Unit* target = ObjectAccessor::GetUnit(*me, instance->GetGuidData(DATA_KARATHRESSEVENT_STARTER))) AttackStart(target); } //Return since we have no target if (!UpdateVictim()) return; //someone evaded! if (!instance->GetData(DATA_KARATHRESSEVENT)) { EnterEvadeMode(); return; } //LeechingThrow_Timer if (LeechingThrow_Timer <= diff) { DoCastVictim(SPELL_LEECHING_THROW); LeechingThrow_Timer = 20000; } else LeechingThrow_Timer -= diff; //Multishot_Timer if (Multishot_Timer <= diff) { DoCastVictim(SPELL_MULTISHOT); Multishot_Timer = 20000; } else Multishot_Timer -= diff; //TheBeastWithin_Timer if (TheBeastWithin_Timer <= diff) { DoCast(me, SPELL_THE_BEAST_WITHIN); Creature* Pet = ObjectAccessor::GetCreature(*me, SummonedPet); if (Pet && Pet->IsAlive()) Pet->CastSpell(Pet, SPELL_PET_ENRAGE, true); TheBeastWithin_Timer = 30000; } else TheBeastWithin_Timer -= diff; //Pet_Timer if (Pet_Timer < diff && pet == false) { pet = true; //uint32 spell_id; uint32 pet_id; if (!urand(0, 1)) { //spell_id = SPELL_SUMMON_FATHOM_LURKER; pet_id = CREATURE_FATHOM_LURKER; } else { //spell_id = SPELL_SUMMON_FATHOM_SPOREBAT; pet_id = CREATURE_FATHOM_SPOREBAT; } //DoCast(me, spell_id, true); if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) { if (Creature* Pet = DoSpawnCreature(pet_id, 0, 0, 0, 0, TEMPSUMMON_TIMED_DESPAWN_OUT_OF_COMBAT, 15000)) { Pet->AI()->AttackStart(target); SummonedPet = Pet->GetGUID(); } } } else Pet_Timer -= diff; DoMeleeAttackIfReady(); } }; }; //Fathom-Guard Tidalvess AI class boss_fathomguard_tidalvess : public CreatureScript { public: boss_fathomguard_tidalvess() : CreatureScript("boss_fathomguard_tidalvess") { } CreatureAI* GetAI(Creature* creature) const override { return GetSerpentshrineCavernAI<boss_fathomguard_tidalvessAI>(creature); } struct boss_fathomguard_tidalvessAI : public ScriptedAI { boss_fathomguard_tidalvessAI(Creature* creature) : ScriptedAI(creature) { Initialize(); instance = creature->GetInstanceScript(); } void Initialize() { FrostShock_Timer = 25000; Spitfire_Timer = 60000; PoisonCleansing_Timer = 30000; Earthbind_Timer = 45000; } InstanceScript* instance; uint32 FrostShock_Timer; uint32 Spitfire_Timer; uint32 PoisonCleansing_Timer; uint32 Earthbind_Timer; void Reset() override { Initialize(); instance->SetData(DATA_KARATHRESSEVENT, NOT_STARTED); } void JustDied(Unit* /*killer*/) override { if (Creature* Karathress = ObjectAccessor::GetCreature(*me, instance->GetGuidData(DATA_KARATHRESS))) ENSURE_AI(boss_fathomlord_karathress::boss_fathomlord_karathressAI, Karathress->AI())->EventTidalvessDeath(); } void EnterCombat(Unit* who) override { instance->SetGuidData(DATA_KARATHRESSEVENT_STARTER, who->GetGUID()); instance->SetData(DATA_KARATHRESSEVENT, IN_PROGRESS); DoCast(me, SPELL_WINDFURY_WEAPON); } void UpdateAI(uint32 diff) override { //Only if not incombat check if the event is started if (!me->IsInCombat() && instance->GetData(DATA_KARATHRESSEVENT)) { if (Unit* target = ObjectAccessor::GetUnit(*me, instance->GetGuidData(DATA_KARATHRESSEVENT_STARTER))) AttackStart(target); } //Return since we have no target if (!UpdateVictim()) return; //someone evaded! if (!instance->GetData(DATA_KARATHRESSEVENT)) { EnterEvadeMode(); return; } if (!me->HasAura(SPELL_WINDFURY_WEAPON)) { DoCast(me, SPELL_WINDFURY_WEAPON); } //FrostShock_Timer if (FrostShock_Timer <= diff) { DoCastVictim(SPELL_FROST_SHOCK); FrostShock_Timer = 25000 + rand32() % 5000; } else FrostShock_Timer -= diff; //Spitfire_Timer if (Spitfire_Timer <= diff) { DoCast(me, SPELL_SPITFIRE_TOTEM); if (Unit* SpitfireTotem = me->FindNearestCreature(CREATURE_SPITFIRE_TOTEM, 100.0f)) SpitfireTotem->ToCreature()->AI()->AttackStart(me->GetVictim()); Spitfire_Timer = 60000; } else Spitfire_Timer -= diff; //PoisonCleansing_Timer if (PoisonCleansing_Timer <= diff) { DoCast(me, SPELL_POISON_CLEANSING_TOTEM); PoisonCleansing_Timer = 30000; } else PoisonCleansing_Timer -= diff; //Earthbind_Timer if (Earthbind_Timer <= diff) { DoCast(me, SPELL_EARTHBIND_TOTEM); Earthbind_Timer = 45000; } else Earthbind_Timer -= diff; DoMeleeAttackIfReady(); } }; }; //Fathom-Guard Caribdis AI class boss_fathomguard_caribdis : public CreatureScript { public: boss_fathomguard_caribdis() : CreatureScript("boss_fathomguard_caribdis") { } CreatureAI* GetAI(Creature* creature) const override { return GetSerpentshrineCavernAI<boss_fathomguard_caribdisAI>(creature); } struct boss_fathomguard_caribdisAI : public ScriptedAI { boss_fathomguard_caribdisAI(Creature* creature) : ScriptedAI(creature) { Initialize(); instance = creature->GetInstanceScript(); } void Initialize() { WaterBoltVolley_Timer = 35000; TidalSurge_Timer = 15000 + rand32() % 5000; Heal_Timer = 55000; Cyclone_Timer = 30000 + rand32() % 10000; } InstanceScript* instance; uint32 WaterBoltVolley_Timer; uint32 TidalSurge_Timer; uint32 Heal_Timer; uint32 Cyclone_Timer; void Reset() override { Initialize(); instance->SetData(DATA_KARATHRESSEVENT, NOT_STARTED); } void JustDied(Unit* /*killer*/) override { if (Creature* Karathress = ObjectAccessor::GetCreature(*me, instance->GetGuidData(DATA_KARATHRESS))) ENSURE_AI(boss_fathomlord_karathress::boss_fathomlord_karathressAI, Karathress->AI())->EventCaribdisDeath(); } void EnterCombat(Unit* who) override { instance->SetGuidData(DATA_KARATHRESSEVENT_STARTER, who->GetGUID()); instance->SetData(DATA_KARATHRESSEVENT, IN_PROGRESS); } void UpdateAI(uint32 diff) override { //Only if not incombat check if the event is started if (!me->IsInCombat() && instance->GetData(DATA_KARATHRESSEVENT)) { if (Unit* target = ObjectAccessor::GetUnit(*me, instance->GetGuidData(DATA_KARATHRESSEVENT_STARTER))) AttackStart(target); } //Return since we have no target if (!UpdateVictim()) return; //someone evaded! if (!instance->GetData(DATA_KARATHRESSEVENT)) { EnterEvadeMode(); return; } //WaterBoltVolley_Timer if (WaterBoltVolley_Timer <= diff) { DoCastVictim(SPELL_WATER_BOLT_VOLLEY); WaterBoltVolley_Timer = 30000; } else WaterBoltVolley_Timer -= diff; //TidalSurge_Timer if (TidalSurge_Timer <= diff) { DoCastVictim(SPELL_TIDAL_SURGE); // Hacky way to do it - won't trigger elseways if (me->GetVictim()) me->EnsureVictim()->CastSpell(me->GetVictim(), SPELL_TIDAL_SURGE_FREEZE, true); TidalSurge_Timer = 15000 + rand32() % 5000; } else TidalSurge_Timer -= diff; //Cyclone_Timer if (Cyclone_Timer <= diff) { //DoCast(me, SPELL_SUMMON_CYCLONE); // Doesn't work Cyclone_Timer = 30000 + rand32() % 10000; if (Creature* Cyclone = me->SummonCreature(CREATURE_CYCLONE, me->GetPositionX(), me->GetPositionY(), me->GetPositionZ(), float(rand32() % 5), TEMPSUMMON_TIMED_DESPAWN, 15000)) { Cyclone->SetObjectScale(3.0f); Cyclone->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NOT_SELECTABLE); Cyclone->setFaction(me->getFaction()); Cyclone->CastSpell(Cyclone, SPELL_CYCLONE_CYCLONE, true); if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) Cyclone->AI()->AttackStart(target); } } else Cyclone_Timer -= diff; //Heal_Timer if (Heal_Timer <= diff) { // It can be cast on any of the mobs Unit* unit = NULL; while (unit == NULL || !unit->IsAlive()) unit = selectAdvisorUnit(); if (unit && unit->IsAlive()) DoCast(unit, SPELL_HEAL); Heal_Timer = 60000; } else Heal_Timer -= diff; DoMeleeAttackIfReady(); } Unit* selectAdvisorUnit() { Unit* unit = NULL; switch (rand32() % 4) { case 0: unit = ObjectAccessor::GetUnit(*me, instance->GetGuidData(DATA_KARATHRESS)); break; case 1: unit = ObjectAccessor::GetUnit(*me, instance->GetGuidData(DATA_SHARKKIS)); break; case 2: unit = ObjectAccessor::GetUnit(*me, instance->GetGuidData(DATA_TIDALVESS)); break; case 3: unit = me; break; } return unit; } }; }; void AddSC_boss_fathomlord_karathress() { new boss_fathomlord_karathress(); new boss_fathomguard_sharkkis(); new boss_fathomguard_tidalvess(); new boss_fathomguard_caribdis(); }
gpl-2.0
joeduong/bideas-openrex-linux-3.14
drivers/video/backlight/ep93xx_bl.c
520
3517
/* * Driver for the Cirrus EP93xx lcd backlight * * Copyright (c) 2010 H Hartley Sweeten <hsweeten@visionengravers.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver controls the pulse width modulated brightness control output, * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/fb.h> #include <linux/backlight.h> #define EP93XX_MAX_COUNT 255 #define EP93XX_MAX_BRIGHT 255 #define EP93XX_DEF_BRIGHT 128 struct ep93xxbl { void __iomem *mmio; int brightness; }; static int ep93xxbl_set(struct backlight_device *bl, int brightness) { struct ep93xxbl *ep93xxbl = bl_get_data(bl); writel((brightness << 8) | EP93XX_MAX_COUNT, ep93xxbl->mmio); ep93xxbl->brightness = brightness; return 0; } static int ep93xxbl_update_status(struct backlight_device *bl) { int brightness = bl->props.brightness; if (bl->props.power != FB_BLANK_UNBLANK || bl->props.fb_blank != FB_BLANK_UNBLANK) brightness = 0; return ep93xxbl_set(bl, brightness); } static int ep93xxbl_get_brightness(struct backlight_device *bl) { struct ep93xxbl *ep93xxbl = bl_get_data(bl); return ep93xxbl->brightness; } static const struct backlight_ops ep93xxbl_ops = { .update_status = ep93xxbl_update_status, .get_brightness = ep93xxbl_get_brightness, }; static int ep93xxbl_probe(struct platform_device *dev) { struct ep93xxbl *ep93xxbl; struct backlight_device *bl; struct backlight_properties props; struct resource *res; ep93xxbl = devm_kzalloc(&dev->dev, sizeof(*ep93xxbl), GFP_KERNEL); if (!ep93xxbl) return -ENOMEM; res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; /* * FIXME - We don't do a request_mem_region here because we are * sharing the register space with the framebuffer driver (see * drivers/video/ep93xx-fb.c) and doing so will cause the second * loaded driver to return -EBUSY. * * NOTE: No locking is required; the framebuffer does not touch * this register. */ ep93xxbl->mmio = devm_ioremap(&dev->dev, res->start, resource_size(res)); if (!ep93xxbl->mmio) return -ENXIO; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = EP93XX_MAX_BRIGHT; bl = devm_backlight_device_register(&dev->dev, dev->name, &dev->dev, ep93xxbl, &ep93xxbl_ops, &props); if (IS_ERR(bl)) return PTR_ERR(bl); bl->props.brightness = EP93XX_DEF_BRIGHT; platform_set_drvdata(dev, bl); ep93xxbl_update_status(bl); return 0; } #ifdef CONFIG_PM_SLEEP static int ep93xxbl_suspend(struct device *dev) { struct backlight_device *bl = dev_get_drvdata(dev); return ep93xxbl_set(bl, 0); } static int ep93xxbl_resume(struct device *dev) { struct backlight_device *bl = dev_get_drvdata(dev); backlight_update_status(bl); return 0; } #endif static SIMPLE_DEV_PM_OPS(ep93xxbl_pm_ops, ep93xxbl_suspend, ep93xxbl_resume); static struct platform_driver ep93xxbl_driver = { .driver = { .name = "ep93xx-bl", .owner = THIS_MODULE, .pm = &ep93xxbl_pm_ops, }, .probe = ep93xxbl_probe, }; module_platform_driver(ep93xxbl_driver); MODULE_DESCRIPTION("EP93xx Backlight Driver"); MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ep93xx-bl");
gpl-2.0
AD5GB/android_kernel_samsung_msm8660-common
drivers/gpu/drm/i915/i915_dma.c
1288
59217
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- */ /* * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm.h" #include "drm_crtc_helper.h" #include "drm_fb_helper.h" #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" #include "i915_trace.h" #include "../../../platform/x86/intel_ips.h" #include <linux/pci.h> #include <linux/vgaarb.h> #include <linux/acpi.h> #include <linux/pnp.h> #include <linux/vga_switcheroo.h> #include <linux/slab.h> #include <acpi/video.h> static void i915_write_hws_pga(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 addr; addr = dev_priv->status_page_dmah->busaddr; if (INTEL_INFO(dev)->gen >= 4) addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; I915_WRITE(HWS_PGA, addr); } /** * Sets up the hardware status page for devices that need a physical address * in the register. */ static int i915_init_phys_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; /* Program Hardware Status Page */ dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); if (!dev_priv->status_page_dmah) { DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr, 0, PAGE_SIZE); i915_write_hws_pga(dev); DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; } /** * Frees the hardware status page, whether it's a physical address or a virtual * address set up by the X Server. */ static void i915_free_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring = LP_RING(dev_priv); if (dev_priv->status_page_dmah) { drm_pci_free(dev, dev_priv->status_page_dmah); dev_priv->status_page_dmah = NULL; } if (ring->status_page.gfx_addr) { ring->status_page.gfx_addr = 0; drm_core_ioremapfree(&dev_priv->hws_map, dev); } /* Need to rewrite hardware status page */ I915_WRITE(HWS_PGA, 0x1ffff000); } void i915_kernel_lost_context(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; struct intel_ring_buffer *ring = LP_RING(dev_priv); /* * We should never lose context on the ring with modesetting * as we don't expose it to userspace */ if (drm_core_check_feature(dev, DRIVER_MODESET)) return; ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; if (!dev->primary->master) return; master_priv = dev->primary->master->driver_priv; if (ring->head == ring->tail && master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; } static int i915_dma_cleanup(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; int i; /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. */ if (dev->irq_enabled) drm_irq_uninstall(dev); mutex_lock(&dev->struct_mutex); for (i = 0; i < I915_NUM_RINGS; i++) intel_cleanup_ring_buffer(&dev_priv->ring[i]); mutex_unlock(&dev->struct_mutex); /* Clear the HWS virtual address at teardown */ if (I915_NEED_GFX_HWS(dev)) i915_free_hws(dev); return 0; } static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret; master_priv->sarea = drm_getsarea(dev); if (master_priv->sarea) { master_priv->sarea_priv = (drm_i915_sarea_t *) ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); } else { DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n"); } if (init->ring_size != 0) { if (LP_RING(dev_priv)->obj != NULL) { i915_dma_cleanup(dev); DRM_ERROR("Client tried to initialize ringbuffer in " "GEM mode\n"); return -EINVAL; } ret = intel_render_ring_init_dri(dev, init->ring_start, init->ring_size); if (ret) { i915_dma_cleanup(dev); return ret; } } dev_priv->cpp = init->cpp; dev_priv->back_offset = init->back_offset; dev_priv->front_offset = init->front_offset; dev_priv->current_page = 0; if (master_priv->sarea_priv) master_priv->sarea_priv->pf_current_page = 0; /* Allow hardware batchbuffers unless told otherwise. */ dev_priv->allow_batchbuffer = 1; return 0; } static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct intel_ring_buffer *ring = LP_RING(dev_priv); DRM_DEBUG_DRIVER("%s\n", __func__); if (ring->map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); return -ENOMEM; } /* Program Hardware Status Page */ if (!ring->status_page.page_addr) { DRM_ERROR("Can not find hardware status page\n"); return -EINVAL; } DRM_DEBUG_DRIVER("hw status page @ %p\n", ring->status_page.page_addr); if (ring->status_page.gfx_addr != 0) intel_ring_setup_status_page(ring); else i915_write_hws_pga(dev); DRM_DEBUG_DRIVER("Enabled hardware status page\n"); return 0; } static int i915_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_init_t *init = data; int retcode = 0; switch (init->func) { case I915_INIT_DMA: retcode = i915_initialize(dev, init); break; case I915_CLEANUP_DMA: retcode = i915_dma_cleanup(dev); break; case I915_RESUME_DMA: retcode = i915_dma_resume(dev); break; default: retcode = -EINVAL; break; } return retcode; } /* Implement basically the same security restrictions as hardware does * for MI_BATCH_NON_SECURE. These can be made stricter at any time. * * Most of the calculations below involve calculating the size of a * particular instruction. It's important to get the size right as * that tells us where the next instruction to check is. Any illegal * instruction detected will be given a size of zero, which is a * signal to abort the rest of the buffer. */ static int validate_cmd(int cmd) { switch (((cmd >> 29) & 0x7)) { case 0x0: switch ((cmd >> 23) & 0x3f) { case 0x0: return 1; /* MI_NOOP */ case 0x4: return 1; /* MI_FLUSH */ default: return 0; /* disallow everything else */ } break; case 0x1: return 0; /* reserved */ case 0x2: return (cmd & 0xff) + 2; /* 2d commands */ case 0x3: if (((cmd >> 24) & 0x1f) <= 0x18) return 1; switch ((cmd >> 24) & 0x1f) { case 0x1c: return 1; case 0x1d: switch ((cmd >> 16) & 0xff) { case 0x3: return (cmd & 0x1f) + 2; case 0x4: return (cmd & 0xf) + 2; default: return (cmd & 0xffff) + 2; } case 0x1e: if (cmd & (1 << 23)) return (cmd & 0xffff) + 1; else return 1; case 0x1f: if ((cmd & (1 << 23)) == 0) /* inline vertices */ return (cmd & 0x1ffff) + 2; else if (cmd & (1 << 17)) /* indirect random */ if ((cmd & 0xffff) == 0) return 0; /* unknown length, too hard */ else return (((cmd & 0xffff) + 1) / 2) + 1; else return 2; /* indirect sequential */ default: return 0; } default: return 0; } return 0; } static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i, ret; if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) return -EINVAL; for (i = 0; i < dwords;) { int sz = validate_cmd(buffer[i]); if (sz == 0 || i + sz > dwords) return -EINVAL; i += sz; } ret = BEGIN_LP_RING((dwords+1)&~1); if (ret) return ret; for (i = 0; i < dwords; i++) OUT_RING(buffer[i]); if (dwords & 1) OUT_RING(0); ADVANCE_LP_RING(); return 0; } int i915_emit_box(struct drm_device *dev, struct drm_clip_rect *box, int DR1, int DR4) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (box->y2 <= box->y1 || box->x2 <= box->x1 || box->y2 <= 0 || box->x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", box->x1, box->y1, box->x2, box->y2); return -EINVAL; } if (INTEL_INFO(dev)->gen >= 4) { ret = BEGIN_LP_RING(4); if (ret) return ret; OUT_RING(GFX_OP_DRAWRECT_INFO_I965); OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); OUT_RING(DR4); } else { ret = BEGIN_LP_RING(6); if (ret) return ret; OUT_RING(GFX_OP_DRAWRECT_INFO); OUT_RING(DR1); OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); OUT_RING(DR4); OUT_RING(0); } ADVANCE_LP_RING(); return 0; } /* XXX: Emitting the counter should really be moved to part of the IRQ * emit. For now, do it in both places: */ static void i915_emit_breadcrumb(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; dev_priv->counter++; if (dev_priv->counter > 0x7FFFFFFFUL) dev_priv->counter = 0; if (master_priv->sarea_priv) master_priv->sarea_priv->last_enqueue = dev_priv->counter; if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); } } static int i915_dispatch_cmdbuffer(struct drm_device * dev, drm_i915_cmdbuffer_t *cmd, struct drm_clip_rect *cliprects, void *cmdbuf) { int nbox = cmd->num_cliprects; int i = 0, count, ret; if (cmd->sz & 0x3) { DRM_ERROR("alignment"); return -EINVAL; } i915_kernel_lost_context(dev); count = nbox ? nbox : 1; for (i = 0; i < count; i++) { if (i < nbox) { ret = i915_emit_box(dev, &cliprects[i], cmd->DR1, cmd->DR4); if (ret) return ret; } ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4); if (ret) return ret; } i915_emit_breadcrumb(dev); return 0; } static int i915_dispatch_batchbuffer(struct drm_device * dev, drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects) { struct drm_i915_private *dev_priv = dev->dev_private; int nbox = batch->num_cliprects; int i, count, ret; if ((batch->start | batch->used) & 0x7) { DRM_ERROR("alignment"); return -EINVAL; } i915_kernel_lost_context(dev); count = nbox ? nbox : 1; for (i = 0; i < count; i++) { if (i < nbox) { ret = i915_emit_box(dev, &cliprects[i], batch->DR1, batch->DR4); if (ret) return ret; } if (!IS_I830(dev) && !IS_845G(dev)) { ret = BEGIN_LP_RING(2); if (ret) return ret; if (INTEL_INFO(dev)->gen >= 4) { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); OUT_RING(batch->start); } else { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); OUT_RING(batch->start | MI_BATCH_NON_SECURE); } } else { ret = BEGIN_LP_RING(4); if (ret) return ret; OUT_RING(MI_BATCH_BUFFER); OUT_RING(batch->start | MI_BATCH_NON_SECURE); OUT_RING(batch->start + batch->used - 4); OUT_RING(0); } ADVANCE_LP_RING(); } if (IS_G4X(dev) || IS_GEN5(dev)) { if (BEGIN_LP_RING(2) == 0) { OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); OUT_RING(MI_NOOP); ADVANCE_LP_RING(); } } i915_emit_breadcrumb(dev); return 0; } static int i915_dispatch_flip(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret; if (!master_priv->sarea_priv) return -EINVAL; DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", __func__, dev_priv->current_page, master_priv->sarea_priv->pf_current_page); i915_kernel_lost_context(dev); ret = BEGIN_LP_RING(10); if (ret) return ret; OUT_RING(MI_FLUSH | MI_READ_FLUSH); OUT_RING(0); OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); OUT_RING(0); if (dev_priv->current_page == 0) { OUT_RING(dev_priv->back_offset); dev_priv->current_page = 1; } else { OUT_RING(dev_priv->front_offset); dev_priv->current_page = 0; } OUT_RING(0); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); OUT_RING(0); ADVANCE_LP_RING(); master_priv->sarea_priv->last_enqueue = dev_priv->counter++; if (BEGIN_LP_RING(4) == 0) { OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); } master_priv->sarea_priv->pf_current_page = dev_priv->current_page; return 0; } static int i915_quiescent(struct drm_device *dev) { struct intel_ring_buffer *ring = LP_RING(dev->dev_private); i915_kernel_lost_context(dev); return intel_wait_ring_idle(ring); } static int i915_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; RING_LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->struct_mutex); ret = i915_quiescent(dev); mutex_unlock(&dev->struct_mutex); return ret; } static int i915_batchbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; drm_i915_batchbuffer_t *batch = data; int ret; struct drm_clip_rect *cliprects = NULL; if (!dev_priv->allow_batchbuffer) { DRM_ERROR("Batchbuffer ioctl disabled\n"); return -EINVAL; } DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n", batch->start, batch->used, batch->num_cliprects); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); if (batch->num_cliprects < 0) return -EINVAL; if (batch->num_cliprects) { cliprects = kcalloc(batch->num_cliprects, sizeof(struct drm_clip_rect), GFP_KERNEL); if (cliprects == NULL) return -ENOMEM; ret = copy_from_user(cliprects, batch->cliprects, batch->num_cliprects * sizeof(struct drm_clip_rect)); if (ret != 0) { ret = -EFAULT; goto fail_free; } } mutex_lock(&dev->struct_mutex); ret = i915_dispatch_batchbuffer(dev, batch, cliprects); mutex_unlock(&dev->struct_mutex); if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); fail_free: kfree(cliprects); return ret; } static int i915_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv; drm_i915_cmdbuffer_t *cmdbuf = data; struct drm_clip_rect *cliprects = NULL; void *batch_data; int ret; DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n", cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); if (cmdbuf->num_cliprects < 0) return -EINVAL; batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL); if (batch_data == NULL) return -ENOMEM; ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); if (ret != 0) { ret = -EFAULT; goto fail_batch_free; } if (cmdbuf->num_cliprects) { cliprects = kcalloc(cmdbuf->num_cliprects, sizeof(struct drm_clip_rect), GFP_KERNEL); if (cliprects == NULL) { ret = -ENOMEM; goto fail_batch_free; } ret = copy_from_user(cliprects, cmdbuf->cliprects, cmdbuf->num_cliprects * sizeof(struct drm_clip_rect)); if (ret != 0) { ret = -EFAULT; goto fail_clip_free; } } mutex_lock(&dev->struct_mutex); ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data); mutex_unlock(&dev->struct_mutex); if (ret) { DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); goto fail_clip_free; } if (sarea_priv) sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); fail_clip_free: kfree(cliprects); fail_batch_free: kfree(batch_data); return ret; } static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; DRM_DEBUG_DRIVER("%s\n", __func__); RING_LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->struct_mutex); ret = i915_dispatch_flip(dev); mutex_unlock(&dev->struct_mutex); return ret; } static int i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_getparam_t *param = data; int value; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } switch (param->param) { case I915_PARAM_IRQ_ACTIVE: value = dev->pdev->irq ? 1 : 0; break; case I915_PARAM_ALLOW_BATCHBUFFER: value = dev_priv->allow_batchbuffer ? 1 : 0; break; case I915_PARAM_LAST_DISPATCH: value = READ_BREADCRUMB(dev_priv); break; case I915_PARAM_CHIPSET_ID: value = dev->pci_device; break; case I915_PARAM_HAS_GEM: value = dev_priv->has_gem; break; case I915_PARAM_NUM_FENCES_AVAIL: value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; break; case I915_PARAM_HAS_OVERLAY: value = dev_priv->overlay ? 1 : 0; break; case I915_PARAM_HAS_PAGEFLIPPING: value = 1; break; case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */ value = dev_priv->has_gem; break; case I915_PARAM_HAS_BSD: value = HAS_BSD(dev); break; case I915_PARAM_HAS_BLT: value = HAS_BLT(dev); break; case I915_PARAM_HAS_RELAXED_FENCING: value = 1; break; case I915_PARAM_HAS_COHERENT_RINGS: value = 1; break; case I915_PARAM_HAS_EXEC_CONSTANTS: value = INTEL_INFO(dev)->gen >= 4; break; case I915_PARAM_HAS_RELAXED_DELTA: value = 1; break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); return -EINVAL; } if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { DRM_ERROR("DRM_COPY_TO_USER failed\n"); return -EFAULT; } return 0; } static int i915_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_setparam_t *param = data; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } switch (param->param) { case I915_SETPARAM_USE_MI_BATCHBUFFER_START: break; case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: dev_priv->tex_lru_log_granularity = param->value; break; case I915_SETPARAM_ALLOW_BATCHBUFFER: dev_priv->allow_batchbuffer = param->value; break; case I915_SETPARAM_NUM_USED_FENCES: if (param->value > dev_priv->num_fence_regs || param->value < 0) return -EINVAL; /* Userspace can use first N regs */ dev_priv->fence_reg_start = param->value; break; default: DRM_DEBUG_DRIVER("unknown parameter %d\n", param->param); return -EINVAL; } return 0; } static int i915_set_status_page(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_hws_addr_t *hws = data; struct intel_ring_buffer *ring = LP_RING(dev_priv); if (!I915_NEED_GFX_HWS(dev)) return -EINVAL; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } if (drm_core_check_feature(dev, DRIVER_MODESET)) { WARN(1, "tried to set status page when mode setting active\n"); return 0; } DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); dev_priv->hws_map.offset = dev->agp->base + hws->addr; dev_priv->hws_map.size = 4*1024; dev_priv->hws_map.type = 0; dev_priv->hws_map.flags = 0; dev_priv->hws_map.mtrr = 0; drm_core_ioremap_wc(&dev_priv->hws_map, dev); if (dev_priv->hws_map.handle == NULL) { i915_dma_cleanup(dev); ring->status_page.gfx_addr = 0; DRM_ERROR("can not ioremap virtual address for" " G33 hw status page\n"); return -ENOMEM; } ring->status_page.page_addr = (void __force __iomem *)dev_priv->hws_map.handle; memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", ring->status_page.gfx_addr); DRM_DEBUG_DRIVER("load hws at %p\n", ring->status_page.page_addr); return 0; } static int i915_get_bridge_dev(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); if (!dev_priv->bridge_dev) { DRM_ERROR("bridge device not found\n"); return -1; } return 0; } #define MCHBAR_I915 0x44 #define MCHBAR_I965 0x48 #define MCHBAR_SIZE (4*4096) #define DEVEN_REG 0x54 #define DEVEN_MCHBAR_EN (1 << 28) /* Allocate space for the MCH regs if needed, return nonzero on error */ static int intel_alloc_mchbar_resource(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp_lo, temp_hi = 0; u64 mchbar_addr; int ret; if (INTEL_INFO(dev)->gen >= 4) pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); mchbar_addr = ((u64)temp_hi << 32) | temp_lo; /* If ACPI doesn't have it, assume we need to allocate it ourselves */ #ifdef CONFIG_PNP if (mchbar_addr && pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) return 0; #endif /* Get some space for it */ dev_priv->mch_res.name = "i915 MCHBAR"; dev_priv->mch_res.flags = IORESOURCE_MEM; ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, MCHBAR_SIZE, MCHBAR_SIZE, PCIBIOS_MIN_MEM, 0, pcibios_align_resource, dev_priv->bridge_dev); if (ret) { DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); dev_priv->mch_res.start = 0; return ret; } if (INTEL_INFO(dev)->gen >= 4) pci_write_config_dword(dev_priv->bridge_dev, reg + 4, upper_32_bits(dev_priv->mch_res.start)); pci_write_config_dword(dev_priv->bridge_dev, reg, lower_32_bits(dev_priv->mch_res.start)); return 0; } /* Setup MCHBAR if possible, return true if we should disable it again */ static void intel_setup_mchbar(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; bool enabled; dev_priv->mchbar_need_disable = false; if (IS_I915G(dev) || IS_I915GM(dev)) { pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); enabled = !!(temp & DEVEN_MCHBAR_EN); } else { pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); enabled = temp & 1; } /* If it's already enabled, don't have to do anything */ if (enabled) return; if (intel_alloc_mchbar_resource(dev)) return; dev_priv->mchbar_need_disable = true; /* Space is allocated or reserved, so enable it. */ if (IS_I915G(dev) || IS_I915GM(dev)) { pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp | DEVEN_MCHBAR_EN); } else { pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); } } static void intel_teardown_mchbar(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; u32 temp; if (dev_priv->mchbar_need_disable) { if (IS_I915G(dev) || IS_I915GM(dev)) { pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); temp &= ~DEVEN_MCHBAR_EN; pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); } else { pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); temp &= ~1; pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); } } if (dev_priv->mch_res.start) release_resource(&dev_priv->mch_res); } #define PTE_ADDRESS_MASK 0xfffff000 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ #define PTE_MAPPING_TYPE_CACHED (3 << 1) #define PTE_MAPPING_TYPE_MASK (3 << 1) #define PTE_VALID (1 << 0) /** * i915_stolen_to_phys - take an offset into stolen memory and turn it into * a physical one * @dev: drm device * @offset: address to translate * * Some chip functions require allocations from stolen space and need the * physical address of the memory in question. */ static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) { struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev_priv->bridge_dev; u32 base; #if 0 /* On the machines I have tested the Graphics Base of Stolen Memory * is unreliable, so compute the base by subtracting the stolen memory * from the Top of Low Usable DRAM which is where the BIOS places * the graphics stolen memory. */ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { /* top 32bits are reserved = 0 */ pci_read_config_dword(pdev, 0xA4, &base); } else { /* XXX presume 8xx is the same as i915 */ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); } #else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { u16 val; pci_read_config_word(pdev, 0xb0, &val); base = val >> 4 << 20; } else { u8 val; pci_read_config_byte(pdev, 0x9c, &val); base = val >> 3 << 27; } base -= dev_priv->mm.gtt->stolen_size; #endif return base + offset; } static void i915_warn_stolen(struct drm_device *dev) { DRM_ERROR("not enough stolen space for compressed buffer, disabling\n"); DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); } static void i915_setup_compression(struct drm_device *dev, int size) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); unsigned long cfb_base; unsigned long ll_base = 0; compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); if (compressed_fb) compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); if (!compressed_fb) goto err; cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); if (!cfb_base) goto err_fb; if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 4096, 4096, 0); if (compressed_llb) compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); if (!compressed_llb) goto err_fb; ll_base = i915_stolen_to_phys(dev, compressed_llb->start); if (!ll_base) goto err_llb; } dev_priv->cfb_size = size; intel_disable_fbc(dev); dev_priv->compressed_fb = compressed_fb; if (HAS_PCH_SPLIT(dev)) I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, compressed_fb->start); } else { I915_WRITE(FBC_CFB_BASE, cfb_base); I915_WRITE(FBC_LL_BASE, ll_base); dev_priv->compressed_llb = compressed_llb; } DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, ll_base, size >> 20); return; err_llb: drm_mm_put_block(compressed_llb); err_fb: drm_mm_put_block(compressed_fb); err: dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; i915_warn_stolen(dev); } static void i915_cleanup_compression(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; drm_mm_put_block(dev_priv->compressed_fb); if (dev_priv->compressed_llb) drm_mm_put_block(dev_priv->compressed_llb); } /* true = enable decode, false = disable decoder */ static unsigned int i915_vga_set_decode(void *cookie, bool state) { struct drm_device *dev = cookie; intel_modeset_vga_set_state(dev, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; else return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { printk(KERN_INFO "i915: switched on\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; /* i915 resume handler doesn't set to D0 */ pci_set_power_state(dev->pdev, PCI_D0); i915_resume(dev); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { printk(KERN_ERR "i915: switched off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; i915_suspend(dev, pmm); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } static bool i915_switcheroo_can_switch(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); bool can_switch; spin_lock(&dev->count_lock); can_switch = (dev->open_count == 0); spin_unlock(&dev->count_lock); return can_switch; } static int i915_load_gem_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long prealloc_size, gtt_size, mappable_size; int ret; prealloc_size = dev_priv->mm.gtt->stolen_size; gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; /* Basic memrange allocator for stolen space */ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); /* Let GEM Manage all of the aperture. * * However, leave one page at the end still bound to the scratch page. * There are a number of places where the hardware apparently * prefetches past the end of the object, and we've seen multiple * hangs with the GPU head pointer stuck in a batchbuffer bound * at the last page of the aperture. One page should be enough to * keep any prefetching inside of the aperture. */ i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); mutex_lock(&dev->struct_mutex); ret = i915_gem_init_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); if (ret) return ret; /* Try to set up FBC with a reasonable compressed buffer size */ if (I915_HAS_FBC(dev) && i915_powersave) { int cfb_size; /* Leave 1M for line length buffer & misc. */ /* Try to get a 32M buffer... */ if (prealloc_size > (36*1024*1024)) cfb_size = 32*1024*1024; else /* fall back to 7/8 of the stolen space */ cfb_size = prealloc_size * 7 / 8; i915_setup_compression(dev, cfb_size); } /* Allow hardware batchbuffers unless told otherwise. */ dev_priv->allow_batchbuffer = 1; return 0; } static int i915_load_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; ret = intel_parse_bios(dev); if (ret) DRM_INFO("failed to find VBIOS tables\n"); /* If we have > 1 VGA cards, then we need to arbitrate access * to the common VGA resources. * * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), * then we do not take part in VGA arbitration and the * vga_client_register() fails with -ENODEV. */ ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); if (ret && ret != -ENODEV) goto out; intel_register_dsm_handler(); ret = vga_switcheroo_register_client(dev->pdev, i915_switcheroo_set_state, NULL, i915_switcheroo_can_switch); if (ret) goto cleanup_vga_client; /* IIR "flip pending" bit means done if this bit is set */ if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE)) dev_priv->flip_pending_is_done = true; intel_modeset_init(dev); ret = i915_load_gem_init(dev); if (ret) goto cleanup_vga_switcheroo; intel_modeset_gem_init(dev); ret = drm_irq_install(dev); if (ret) goto cleanup_gem; /* Always safe in the mode setting case. */ /* FIXME: do pre/post-mode set stuff in core KMS code */ dev->vblank_disable_allowed = 1; ret = intel_fbdev_init(dev); if (ret) goto cleanup_irq; drm_kms_helper_poll_init(dev); /* We're off and running w/KMS */ dev_priv->mm.suspended = 0; return 0; cleanup_irq: drm_irq_uninstall(dev); cleanup_gem: mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); cleanup_vga_switcheroo: vga_switcheroo_unregister_client(dev->pdev); cleanup_vga_client: vga_client_register(dev->pdev, NULL, NULL, NULL); out: return ret; } int i915_master_create(struct drm_device *dev, struct drm_master *master) { struct drm_i915_master_private *master_priv; master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); if (!master_priv) return -ENOMEM; master->driver_priv = master_priv; return 0; } void i915_master_destroy(struct drm_device *dev, struct drm_master *master) { struct drm_i915_master_private *master_priv = master->driver_priv; if (!master_priv) return; kfree(master_priv); master->driver_priv = NULL; } static void i915_pineview_get_mem_freq(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 tmp; tmp = I915_READ(CLKCFG); switch (tmp & CLKCFG_FSB_MASK) { case CLKCFG_FSB_533: dev_priv->fsb_freq = 533; /* 133*4 */ break; case CLKCFG_FSB_800: dev_priv->fsb_freq = 800; /* 200*4 */ break; case CLKCFG_FSB_667: dev_priv->fsb_freq = 667; /* 167*4 */ break; case CLKCFG_FSB_400: dev_priv->fsb_freq = 400; /* 100*4 */ break; } switch (tmp & CLKCFG_MEM_MASK) { case CLKCFG_MEM_533: dev_priv->mem_freq = 533; break; case CLKCFG_MEM_667: dev_priv->mem_freq = 667; break; case CLKCFG_MEM_800: dev_priv->mem_freq = 800; break; } /* detect pineview DDR3 setting */ tmp = I915_READ(CSHRDDR3CTL); dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; } static void i915_ironlake_get_mem_freq(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u16 ddrpll, csipll; ddrpll = I915_READ16(DDRMPLL1); csipll = I915_READ16(CSIPLL0); switch (ddrpll & 0xff) { case 0xc: dev_priv->mem_freq = 800; break; case 0x10: dev_priv->mem_freq = 1066; break; case 0x14: dev_priv->mem_freq = 1333; break; case 0x18: dev_priv->mem_freq = 1600; break; default: DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", ddrpll & 0xff); dev_priv->mem_freq = 0; break; } dev_priv->r_t = dev_priv->mem_freq; switch (csipll & 0x3ff) { case 0x00c: dev_priv->fsb_freq = 3200; break; case 0x00e: dev_priv->fsb_freq = 3733; break; case 0x010: dev_priv->fsb_freq = 4266; break; case 0x012: dev_priv->fsb_freq = 4800; break; case 0x014: dev_priv->fsb_freq = 5333; break; case 0x016: dev_priv->fsb_freq = 5866; break; case 0x018: dev_priv->fsb_freq = 6400; break; default: DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", csipll & 0x3ff); dev_priv->fsb_freq = 0; break; } if (dev_priv->fsb_freq == 3200) { dev_priv->c_m = 0; } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { dev_priv->c_m = 1; } else { dev_priv->c_m = 2; } } static const struct cparams { u16 i; u16 t; u16 m; u16 c; } cparams[] = { { 1, 1333, 301, 28664 }, { 1, 1066, 294, 24460 }, { 1, 800, 294, 25192 }, { 0, 1333, 276, 27605 }, { 0, 1066, 276, 27605 }, { 0, 800, 231, 23784 }, }; unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) { u64 total_count, diff, ret; u32 count1, count2, count3, m = 0, c = 0; unsigned long now = jiffies_to_msecs(jiffies), diff1; int i; diff1 = now - dev_priv->last_time1; /* Prevent division-by-zero if we are asking too fast. * Also, we don't get interesting results if we are polling * faster than once in 10ms, so just return the saved value * in such cases. */ if (diff1 <= 10) return dev_priv->chipset_power; count1 = I915_READ(DMIEC); count2 = I915_READ(DDREC); count3 = I915_READ(CSIEC); total_count = count1 + count2 + count3; /* FIXME: handle per-counter overflow */ if (total_count < dev_priv->last_count1) { diff = ~0UL - dev_priv->last_count1; diff += total_count; } else { diff = total_count - dev_priv->last_count1; } for (i = 0; i < ARRAY_SIZE(cparams); i++) { if (cparams[i].i == dev_priv->c_m && cparams[i].t == dev_priv->r_t) { m = cparams[i].m; c = cparams[i].c; break; } } diff = div_u64(diff, diff1); ret = ((m * diff) + c); ret = div_u64(ret, 10); dev_priv->last_count1 = total_count; dev_priv->last_time1 = now; dev_priv->chipset_power = ret; return ret; } unsigned long i915_mch_val(struct drm_i915_private *dev_priv) { unsigned long m, x, b; u32 tsfs; tsfs = I915_READ(TSFS); m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); x = I915_READ8(TR1); b = tsfs & TSFS_INTR_MASK; return ((m * x) / 127) - b; } static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) { static const struct v_table { u16 vd; /* in .1 mil */ u16 vm; /* in .1 mil */ } v_table[] = { { 0, 0, }, { 375, 0, }, { 500, 0, }, { 625, 0, }, { 750, 0, }, { 875, 0, }, { 1000, 0, }, { 1125, 0, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4125, 3000, }, { 4250, 3125, }, { 4375, 3250, }, { 4500, 3375, }, { 4625, 3500, }, { 4750, 3625, }, { 4875, 3750, }, { 5000, 3875, }, { 5125, 4000, }, { 5250, 4125, }, { 5375, 4250, }, { 5500, 4375, }, { 5625, 4500, }, { 5750, 4625, }, { 5875, 4750, }, { 6000, 4875, }, { 6125, 5000, }, { 6250, 5125, }, { 6375, 5250, }, { 6500, 5375, }, { 6625, 5500, }, { 6750, 5625, }, { 6875, 5750, }, { 7000, 5875, }, { 7125, 6000, }, { 7250, 6125, }, { 7375, 6250, }, { 7500, 6375, }, { 7625, 6500, }, { 7750, 6625, }, { 7875, 6750, }, { 8000, 6875, }, { 8125, 7000, }, { 8250, 7125, }, { 8375, 7250, }, { 8500, 7375, }, { 8625, 7500, }, { 8750, 7625, }, { 8875, 7750, }, { 9000, 7875, }, { 9125, 8000, }, { 9250, 8125, }, { 9375, 8250, }, { 9500, 8375, }, { 9625, 8500, }, { 9750, 8625, }, { 9875, 8750, }, { 10000, 8875, }, { 10125, 9000, }, { 10250, 9125, }, { 10375, 9250, }, { 10500, 9375, }, { 10625, 9500, }, { 10750, 9625, }, { 10875, 9750, }, { 11000, 9875, }, { 11125, 10000, }, { 11250, 10125, }, { 11375, 10250, }, { 11500, 10375, }, { 11625, 10500, }, { 11750, 10625, }, { 11875, 10750, }, { 12000, 10875, }, { 12125, 11000, }, { 12250, 11125, }, { 12375, 11250, }, { 12500, 11375, }, { 12625, 11500, }, { 12750, 11625, }, { 12875, 11750, }, { 13000, 11875, }, { 13125, 12000, }, { 13250, 12125, }, { 13375, 12250, }, { 13500, 12375, }, { 13625, 12500, }, { 13750, 12625, }, { 13875, 12750, }, { 14000, 12875, }, { 14125, 13000, }, { 14250, 13125, }, { 14375, 13250, }, { 14500, 13375, }, { 14625, 13500, }, { 14750, 13625, }, { 14875, 13750, }, { 15000, 13875, }, { 15125, 14000, }, { 15250, 14125, }, { 15375, 14250, }, { 15500, 14375, }, { 15625, 14500, }, { 15750, 14625, }, { 15875, 14750, }, { 16000, 14875, }, { 16125, 15000, }, }; if (dev_priv->info->is_mobile) return v_table[pxvid].vm; else return v_table[pxvid].vd; } void i915_update_gfx_val(struct drm_i915_private *dev_priv) { struct timespec now, diff1; u64 diff; unsigned long diffms; u32 count; getrawmonotonic(&now); diff1 = timespec_sub(now, dev_priv->last_time2); /* Don't divide by 0 */ diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; if (!diffms) return; count = I915_READ(GFXEC); if (count < dev_priv->last_count2) { diff = ~0UL - dev_priv->last_count2; diff += count; } else { diff = count - dev_priv->last_count2; } dev_priv->last_count2 = count; dev_priv->last_time2 = now; /* More magic constants... */ diff = diff * 1181; diff = div_u64(diff, diffms * 10); dev_priv->gfx_power = diff; } unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) { unsigned long t, corr, state1, corr2, state2; u32 pxvid, ext_v; pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); pxvid = (pxvid >> 24) & 0x7f; ext_v = pvid_to_extvid(dev_priv, pxvid); state1 = ext_v; t = i915_mch_val(dev_priv); /* Revel in the empirically derived constants */ /* Correction factor in 1/100000 units */ if (t > 80) corr = ((t * 2349) + 135940); else if (t >= 50) corr = ((t * 964) + 29317); else /* < 50 */ corr = ((t * 301) + 1004); corr = corr * ((150142 * state1) / 10000 - 78642); corr /= 100000; corr2 = (corr * dev_priv->corr); state2 = (corr2 * state1) / 10000; state2 /= 100; /* convert to mW */ i915_update_gfx_val(dev_priv); return dev_priv->gfx_power + state2; } /* Global for IPS driver to get at the current i915 device */ static struct drm_i915_private *i915_mch_dev; /* * Lock protecting IPS related data structures * - i915_mch_dev * - dev_priv->max_delay * - dev_priv->min_delay * - dev_priv->fmax * - dev_priv->gpu_busy */ static DEFINE_SPINLOCK(mchdev_lock); /** * i915_read_mch_val - return value for IPS use * * Calculate and return a value for the IPS driver to use when deciding whether * we have thermal and power headroom to increase CPU or GPU power budget. */ unsigned long i915_read_mch_val(void) { struct drm_i915_private *dev_priv; unsigned long chipset_val, graphics_val, ret = 0; spin_lock(&mchdev_lock); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; chipset_val = i915_chipset_val(dev_priv); graphics_val = i915_gfx_val(dev_priv); ret = chipset_val + graphics_val; out_unlock: spin_unlock(&mchdev_lock); return ret; } EXPORT_SYMBOL_GPL(i915_read_mch_val); /** * i915_gpu_raise - raise GPU frequency limit * * Raise the limit; IPS indicates we have thermal headroom. */ bool i915_gpu_raise(void) { struct drm_i915_private *dev_priv; bool ret = true; spin_lock(&mchdev_lock); if (!i915_mch_dev) { ret = false; goto out_unlock; } dev_priv = i915_mch_dev; if (dev_priv->max_delay > dev_priv->fmax) dev_priv->max_delay--; out_unlock: spin_unlock(&mchdev_lock); return ret; } EXPORT_SYMBOL_GPL(i915_gpu_raise); /** * i915_gpu_lower - lower GPU frequency limit * * IPS indicates we're close to a thermal limit, so throttle back the GPU * frequency maximum. */ bool i915_gpu_lower(void) { struct drm_i915_private *dev_priv; bool ret = true; spin_lock(&mchdev_lock); if (!i915_mch_dev) { ret = false; goto out_unlock; } dev_priv = i915_mch_dev; if (dev_priv->max_delay < dev_priv->min_delay) dev_priv->max_delay++; out_unlock: spin_unlock(&mchdev_lock); return ret; } EXPORT_SYMBOL_GPL(i915_gpu_lower); /** * i915_gpu_busy - indicate GPU business to IPS * * Tell the IPS driver whether or not the GPU is busy. */ bool i915_gpu_busy(void) { struct drm_i915_private *dev_priv; bool ret = false; spin_lock(&mchdev_lock); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; ret = dev_priv->busy; out_unlock: spin_unlock(&mchdev_lock); return ret; } EXPORT_SYMBOL_GPL(i915_gpu_busy); /** * i915_gpu_turbo_disable - disable graphics turbo * * Disable graphics turbo by resetting the max frequency and setting the * current frequency to the default. */ bool i915_gpu_turbo_disable(void) { struct drm_i915_private *dev_priv; bool ret = true; spin_lock(&mchdev_lock); if (!i915_mch_dev) { ret = false; goto out_unlock; } dev_priv = i915_mch_dev; dev_priv->max_delay = dev_priv->fstart; if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) ret = false; out_unlock: spin_unlock(&mchdev_lock); return ret; } EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); /** * Tells the intel_ips driver that the i915 driver is now loaded, if * IPS got loaded first. * * This awkward dance is so that neither module has to depend on the * other in order for IPS to do the appropriate communication of * GPU turbo limits to i915. */ static void ips_ping_for_i915_load(void) { void (*link)(void); link = symbol_get(ips_link_to_i915_driver); if (link) { link(); symbol_put(ips_link_to_i915_driver); } } /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device * @flags: startup flags * * The driver load routine has to do several things: * - drive output discovery via intel_modeset_init() * - initialize the memory manager * - allocate initial config memory * - setup the DRM framebuffer with the allocated memory */ int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; int ret = 0, mmio_bar; uint32_t agp_size; /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; dev->types[7] = _DRM_STAT_PRIMARY; dev->types[8] = _DRM_STAT_SECONDARY; dev->types[9] = _DRM_STAT_DMA; dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; dev->dev_private = (void *)dev_priv; dev_priv->dev = dev; dev_priv->info = (struct intel_device_info *) flags; if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; } /* overlay on gen2 is broken and can't address above 1G */ if (IS_GEN2(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); /* 965GM sometimes incorrectly writes to hardware status page (HWS) * using 32bit addressing, overwriting memory if HWS is located * above 4GB. * * The documentation also mentions an issue with undefined * behaviour if any general state is accessed within a page above 4GB, * which also needs to be handled carefully. */ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); mmio_bar = IS_GEN2(dev) ? 1 : 0; dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); ret = -EIO; goto put_bridge; } dev_priv->mm.gtt = intel_gtt_get(); if (!dev_priv->mm.gtt) { DRM_ERROR("Failed to initialize GTT\n"); ret = -ENODEV; goto out_rmmap; } agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base, agp_size); if (dev_priv->mm.gtt_mapping == NULL) { ret = -EIO; goto out_rmmap; } /* Set up a WC MTRR for non-PAT systems. This is more common than * one would think, because the kernel disables PAT on first * generation Core chips because WC PAT gets overridden by a UC * MTRR if present. Even if a UC MTRR isn't present. */ dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, agp_size, MTRR_TYPE_WRCOMB, 1); if (dev_priv->mm.gtt_mtrr < 0) { DRM_INFO("MTRR allocation failed. Graphics " "performance may suffer.\n"); } /* The i915 workqueue is primarily used for batched retirement of * requests (and thus managing bo) once the task has been completed * by the GPU. i915_gem_retire_requests() is called directly when we * need high-priority retirement, such as waiting for an explicit * bo. * * It is also used for periodic low-priority events, such as * idle-timers and recording error state. * * All tasks on the workqueue are expected to acquire the dev mutex * so there is no point in running more than one instance of the * workqueue at any time: max_active = 1 and NON_REENTRANT. */ dev_priv->wq = alloc_workqueue("i915", WQ_UNBOUND | WQ_NON_REENTRANT, 1); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); ret = -ENOMEM; goto out_mtrrfree; } /* enable GEM by default */ dev_priv->has_gem = 1; intel_irq_init(dev); /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev); intel_setup_gmbus(dev); intel_opregion_setup(dev); /* Make sure the bios did its job and set up vital registers */ intel_setup_bios(dev); i915_gem_load(dev); /* Init HWS */ if (!I915_NEED_GFX_HWS(dev)) { ret = i915_init_phys_hws(dev); if (ret) goto out_gem_unload; } if (IS_PINEVIEW(dev)) i915_pineview_get_mem_freq(dev); else if (IS_GEN5(dev)) i915_ironlake_get_mem_freq(dev); /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there * according to the published specs. It doesn't appear to function * correctly in testing on 945G. * This may be a side effect of MSI having been made available for PEG * and the registers being closely associated. * * According to chipset errata, on the 965GM, MSI interrupts may * be lost or delayed, but we use them anyways to avoid * stuck interrupts on some machines. */ if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->error_lock); spin_lock_init(&dev_priv->rps_lock); if (IS_MOBILE(dev) || !IS_GEN2(dev)) dev_priv->num_pipe = 2; else dev_priv->num_pipe = 1; ret = drm_vblank_init(dev, dev_priv->num_pipe); if (ret) goto out_gem_unload; /* Start out suspended */ dev_priv->mm.suspended = 1; intel_detect_pch(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = i915_load_modeset_init(dev); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); goto out_gem_unload; } } /* Must be done after probing outputs */ intel_opregion_init(dev); acpi_video_register(); setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, (unsigned long) dev); spin_lock(&mchdev_lock); i915_mch_dev = dev_priv; dev_priv->mchdev_lock = &mchdev_lock; spin_unlock(&mchdev_lock); ips_ping_for_i915_load(); return 0; out_gem_unload: if (dev_priv->mm.inactive_shrinker.shrink) unregister_shrinker(&dev_priv->mm.inactive_shrinker); if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); destroy_workqueue(dev_priv->wq); out_mtrrfree: if (dev_priv->mm.gtt_mtrr >= 0) { mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, dev->agp->agp_info.aper_size * 1024 * 1024); dev_priv->mm.gtt_mtrr = -1; } io_mapping_free(dev_priv->mm.gtt_mapping); out_rmmap: pci_iounmap(dev->pdev, dev_priv->regs); put_bridge: pci_dev_put(dev_priv->bridge_dev); free_priv: kfree(dev_priv); return ret; } int i915_driver_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; spin_lock(&mchdev_lock); i915_mch_dev = NULL; spin_unlock(&mchdev_lock); if (dev_priv->mm.inactive_shrinker.shrink) unregister_shrinker(&dev_priv->mm.inactive_shrinker); mutex_lock(&dev->struct_mutex); ret = i915_gpu_idle(dev); if (ret) DRM_ERROR("failed to idle hardware: %d\n", ret); mutex_unlock(&dev->struct_mutex); /* Cancel the retire work handler, which should be idle now. */ cancel_delayed_work_sync(&dev_priv->mm.retire_work); io_mapping_free(dev_priv->mm.gtt_mapping); if (dev_priv->mm.gtt_mtrr >= 0) { mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, dev->agp->agp_info.aper_size * 1024 * 1024); dev_priv->mm.gtt_mtrr = -1; } acpi_video_unregister(); if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_fbdev_fini(dev); intel_modeset_cleanup(dev); /* * free the memory space allocated for the child device * config parsed from VBT */ if (dev_priv->child_dev && dev_priv->child_dev_num) { kfree(dev_priv->child_dev); dev_priv->child_dev = NULL; dev_priv->child_dev_num = 0; } vga_switcheroo_unregister_client(dev->pdev); vga_client_register(dev->pdev, NULL, NULL, NULL); } /* Free error state after interrupts are fully disabled. */ del_timer_sync(&dev_priv->hangcheck_timer); cancel_work_sync(&dev_priv->error_work); i915_destroy_error_state(dev); if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); intel_opregion_fini(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { /* Flush any outstanding unpin_work. */ flush_workqueue(dev_priv->wq); mutex_lock(&dev->struct_mutex); i915_gem_free_all_phys_object(dev); i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); if (I915_HAS_FBC(dev) && i915_powersave) i915_cleanup_compression(dev); drm_mm_takedown(&dev_priv->mm.stolen); intel_cleanup_overlay(dev); if (!I915_NEED_GFX_HWS(dev)) i915_free_hws(dev); } if (dev_priv->regs != NULL) pci_iounmap(dev->pdev, dev_priv->regs); intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); destroy_workqueue(dev_priv->wq); pci_dev_put(dev_priv->bridge_dev); kfree(dev->dev_private); return 0; } int i915_driver_open(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv; DRM_DEBUG_DRIVER("\n"); file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); if (!file_priv) return -ENOMEM; file->driver_priv = file_priv; spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); return 0; } /** * i915_driver_lastclose - clean up after all DRM clients have exited * @dev: DRM device * * Take care of cleaning up after all DRM clients have exited. In the * mode setting case, we want to restore the kernel's initial mode (just * in case the last client left us in a bad state). * * Additionally, in the non-mode setting case, we'll tear down the AGP * and DMA structures, since the kernel won't be using them, and clea * up any GEM state. */ void i915_driver_lastclose(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { intel_fb_restore_mode(dev); vga_switcheroo_process_delayed_switch(); return; } i915_gem_lastclose(dev); if (dev_priv->agp_heap) i915_mem_takedown(&(dev_priv->agp_heap)); i915_dma_cleanup(dev); } void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; i915_gem_release(dev, file_priv); if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_mem_release(dev, file_priv, dev_priv->agp_heap); } void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; kfree(file_priv); } struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); /** * Determine if the device really is AGP or not. * * All Intel graphics chipsets are treated as AGP, even if they are really * PCI-e. * * \param dev The device to be tested. * * \returns * A value of 1 is always retured to indictate every i9x5 is AGP. */ int i915_driver_device_is_agp(struct drm_device * dev) { return 1; }
gpl-2.0
pbeeler/htc-kernel-msm7x30
drivers/video/intelfb/intelfb_i2c.c
1544
5901
/************************************************************************** Copyright 2006 Dave Airlie <airlied@linux.ie> All Rights Reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation on the rights to use, copy, modify, merge, publish, distribute, sub license, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/fb.h> #include <linux/i2c.h> #include <linux/i2c-id.h> #include <linux/i2c-algo-bit.h> #include <asm/io.h> #include "intelfb.h" #include "intelfbhw.h" /* bit locations in the registers */ #define SCL_DIR_MASK 0x0001 #define SCL_DIR 0x0002 #define SCL_VAL_MASK 0x0004 #define SCL_VAL_OUT 0x0008 #define SCL_VAL_IN 0x0010 #define SDA_DIR_MASK 0x0100 #define SDA_DIR 0x0200 #define SDA_VAL_MASK 0x0400 #define SDA_VAL_OUT 0x0800 #define SDA_VAL_IN 0x1000 static void intelfb_gpio_setscl(void *data, int state) { struct intelfb_i2c_chan *chan = data; struct intelfb_info *dinfo = chan->dinfo; u32 val; OUTREG(chan->reg, (state ? SCL_VAL_OUT : 0) | SCL_DIR | SCL_DIR_MASK | SCL_VAL_MASK); val = INREG(chan->reg); } static void intelfb_gpio_setsda(void *data, int state) { struct intelfb_i2c_chan *chan = data; struct intelfb_info *dinfo = chan->dinfo; u32 val; OUTREG(chan->reg, (state ? SDA_VAL_OUT : 0) | SDA_DIR | SDA_DIR_MASK | SDA_VAL_MASK); val = INREG(chan->reg); } static int intelfb_gpio_getscl(void *data) { struct intelfb_i2c_chan *chan = data; struct intelfb_info *dinfo = chan->dinfo; u32 val; OUTREG(chan->reg, SCL_DIR_MASK); OUTREG(chan->reg, 0); val = INREG(chan->reg); return ((val & SCL_VAL_IN) != 0); } static int intelfb_gpio_getsda(void *data) { struct intelfb_i2c_chan *chan = data; struct intelfb_info *dinfo = chan->dinfo; u32 val; OUTREG(chan->reg, SDA_DIR_MASK); OUTREG(chan->reg, 0); val = INREG(chan->reg); return ((val & SDA_VAL_IN) != 0); } static int intelfb_setup_i2c_bus(struct intelfb_info *dinfo, struct intelfb_i2c_chan *chan, const u32 reg, const char *name, int class) { int rc; chan->dinfo = dinfo; chan->reg = reg; snprintf(chan->adapter.name, sizeof(chan->adapter.name), "intelfb %s", name); chan->adapter.class = class; chan->adapter.owner = THIS_MODULE; chan->adapter.algo_data = &chan->algo; chan->adapter.dev.parent = &chan->dinfo->pdev->dev; chan->algo.setsda = intelfb_gpio_setsda; chan->algo.setscl = intelfb_gpio_setscl; chan->algo.getsda = intelfb_gpio_getsda; chan->algo.getscl = intelfb_gpio_getscl; chan->algo.udelay = 40; chan->algo.timeout = 20; chan->algo.data = chan; i2c_set_adapdata(&chan->adapter, chan); /* Raise SCL and SDA */ intelfb_gpio_setsda(chan, 1); intelfb_gpio_setscl(chan, 1); udelay(20); rc = i2c_bit_add_bus(&chan->adapter); if (rc == 0) DBG_MSG("I2C bus %s registered.\n", name); else WRN_MSG("Failed to register I2C bus %s.\n", name); return rc; } void intelfb_create_i2c_busses(struct intelfb_info *dinfo) { int i = 0; /* everyone has at least a single analog output */ dinfo->num_outputs = 1; dinfo->output[i].type = INTELFB_OUTPUT_ANALOG; /* setup the DDC bus for analog output */ intelfb_setup_i2c_bus(dinfo, &dinfo->output[i].ddc_bus, GPIOA, "CRTDDC_A", I2C_CLASS_DDC); i++; /* need to add the output busses for each device - this function is very incomplete - i915GM has LVDS and TVOUT for example */ switch(dinfo->chipset) { case INTEL_830M: case INTEL_845G: case INTEL_854: case INTEL_855GM: case INTEL_865G: dinfo->output[i].type = INTELFB_OUTPUT_DVO; intelfb_setup_i2c_bus(dinfo, &dinfo->output[i].ddc_bus, GPIOD, "DVODDC_D", I2C_CLASS_DDC); intelfb_setup_i2c_bus(dinfo, &dinfo->output[i].i2c_bus, GPIOE, "DVOI2C_E", 0); i++; break; case INTEL_915G: case INTEL_915GM: /* has some LVDS + tv-out */ case INTEL_945G: case INTEL_945GM: case INTEL_945GME: case INTEL_965G: case INTEL_965GM: /* SDVO ports have a single control bus - 2 devices */ dinfo->output[i].type = INTELFB_OUTPUT_SDVO; intelfb_setup_i2c_bus(dinfo, &dinfo->output[i].i2c_bus, GPIOE, "SDVOCTRL_E", 0); /* TODO: initialize the SDVO */ /* I830SDVOInit(pScrn, i, DVOB); */ i++; /* set up SDVOC */ dinfo->output[i].type = INTELFB_OUTPUT_SDVO; dinfo->output[i].i2c_bus = dinfo->output[i - 1].i2c_bus; /* TODO: initialize the SDVO */ /* I830SDVOInit(pScrn, i, DVOC); */ i++; break; } dinfo->num_outputs = i; } void intelfb_delete_i2c_busses(struct intelfb_info *dinfo) { int i; for (i = 0; i < MAX_OUTPUTS; i++) { if (dinfo->output[i].i2c_bus.dinfo) { i2c_del_adapter(&dinfo->output[i].i2c_bus.adapter); dinfo->output[i].i2c_bus.dinfo = NULL; } if (dinfo->output[i].ddc_bus.dinfo) { i2c_del_adapter(&dinfo->output[i].ddc_bus.adapter); dinfo->output[i].ddc_bus.dinfo = NULL; } } }
gpl-2.0
hephaex/a10c
drivers/staging/media/go7007/go7007-driver.c
2312
18308
/* * Copyright (C) 2005-2006 Micronas USA Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/unistd.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/firmware.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/tuner.h> #include <media/v4l2-common.h> #include "go7007-priv.h" /* * Wait for an interrupt to be delivered from the GO7007SB and return * the associated value and data. * * Must be called with the hw_lock held. */ int go7007_read_interrupt(struct go7007 *go, u16 *value, u16 *data) { go->interrupt_available = 0; go->hpi_ops->read_interrupt(go); if (wait_event_timeout(go->interrupt_waitq, go->interrupt_available, 5*HZ) < 0) { v4l2_err(&go->v4l2_dev, "timeout waiting for read interrupt\n"); return -1; } if (!go->interrupt_available) return -1; go->interrupt_available = 0; *value = go->interrupt_value & 0xfffe; *data = go->interrupt_data; return 0; } EXPORT_SYMBOL(go7007_read_interrupt); /* * Read a register/address on the GO7007SB. * * Must be called with the hw_lock held. */ int go7007_read_addr(struct go7007 *go, u16 addr, u16 *data) { int count = 100; u16 value; if (go7007_write_interrupt(go, 0x0010, addr) < 0) return -EIO; while (count-- > 0) { if (go7007_read_interrupt(go, &value, data) == 0 && value == 0xa000) return 0; } return -EIO; } EXPORT_SYMBOL(go7007_read_addr); /* * Send the boot firmware to the encoder, which just wakes it up and lets * us talk to the GPIO pins and on-board I2C adapter. * * Must be called with the hw_lock held. */ static int go7007_load_encoder(struct go7007 *go) { const struct firmware *fw_entry; char fw_name[] = "go7007/go7007fw.bin"; void *bounce; int fw_len, rv = 0; u16 intr_val, intr_data; if (go->boot_fw == NULL) { if (request_firmware(&fw_entry, fw_name, go->dev)) { v4l2_err(go, "unable to load firmware from file \"%s\"\n", fw_name); return -1; } if (fw_entry->size < 16 || memcmp(fw_entry->data, "WISGO7007FW", 11)) { v4l2_err(go, "file \"%s\" does not appear to be go7007 firmware\n", fw_name); release_firmware(fw_entry); return -1; } fw_len = fw_entry->size - 16; bounce = kmemdup(fw_entry->data + 16, fw_len, GFP_KERNEL); if (bounce == NULL) { v4l2_err(go, "unable to allocate %d bytes for firmware transfer\n", fw_len); release_firmware(fw_entry); return -1; } release_firmware(fw_entry); go->boot_fw_len = fw_len; go->boot_fw = bounce; } if (go7007_interface_reset(go) < 0 || go7007_send_firmware(go, go->boot_fw, go->boot_fw_len) < 0 || go7007_read_interrupt(go, &intr_val, &intr_data) < 0 || (intr_val & ~0x1) != 0x5a5a) { v4l2_err(go, "error transferring firmware\n"); rv = -1; } return rv; } MODULE_FIRMWARE("go7007/go7007fw.bin"); /* * Boot the encoder and register the I2C adapter if requested. Do the * minimum initialization necessary, since the board-specific code may * still need to probe the board ID. * * Must NOT be called with the hw_lock held. */ int go7007_boot_encoder(struct go7007 *go, int init_i2c) { int ret; mutex_lock(&go->hw_lock); ret = go7007_load_encoder(go); mutex_unlock(&go->hw_lock); if (ret < 0) return -1; if (!init_i2c) return 0; if (go7007_i2c_init(go) < 0) return -1; go->i2c_adapter_online = 1; return 0; } EXPORT_SYMBOL(go7007_boot_encoder); /* * Configure any hardware-related registers in the GO7007, such as GPIO * pins and bus parameters, which are board-specific. This assumes * the boot firmware has already been downloaded. * * Must be called with the hw_lock held. */ static int go7007_init_encoder(struct go7007 *go) { if (go->board_info->audio_flags & GO7007_AUDIO_I2S_MASTER) { go7007_write_addr(go, 0x1000, 0x0811); go7007_write_addr(go, 0x1000, 0x0c11); } switch (go->board_id) { case GO7007_BOARDID_MATRIX_REV: /* Set GPIO pin 0 to be an output (audio clock control) */ go7007_write_addr(go, 0x3c82, 0x0001); go7007_write_addr(go, 0x3c80, 0x00fe); break; case GO7007_BOARDID_ADLINK_MPG24: /* set GPIO5 to be an output, currently low */ go7007_write_addr(go, 0x3c82, 0x0000); go7007_write_addr(go, 0x3c80, 0x00df); break; case GO7007_BOARDID_ADS_USBAV_709: /* GPIO pin 0: audio clock control */ /* pin 2: TW9906 reset */ /* pin 3: capture LED */ go7007_write_addr(go, 0x3c82, 0x000d); go7007_write_addr(go, 0x3c80, 0x00f2); break; } return 0; } /* * Send the boot firmware to the GO7007 and configure the registers. This * is the only way to stop the encoder once it has started streaming video. * * Must be called with the hw_lock held. */ int go7007_reset_encoder(struct go7007 *go) { if (go7007_load_encoder(go) < 0) return -1; return go7007_init_encoder(go); } /* * Attempt to instantiate an I2C client by ID, probably loading a module. */ static int init_i2c_module(struct i2c_adapter *adapter, const struct go_i2c *const i2c) { struct go7007 *go = i2c_get_adapdata(adapter); struct v4l2_device *v4l2_dev = &go->v4l2_dev; struct v4l2_subdev *sd; struct i2c_board_info info; memset(&info, 0, sizeof(info)); strlcpy(info.type, i2c->type, sizeof(info.type)); info.addr = i2c->addr; info.flags = i2c->flags; sd = v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, NULL); if (sd) { if (i2c->is_video) go->sd_video = sd; if (i2c->is_audio) go->sd_audio = sd; return 0; } printk(KERN_INFO "go7007: probing for module i2c:%s failed\n", i2c->type); return -EINVAL; } /* * Detach and unregister the encoder. The go7007 struct won't be freed * until v4l2 finishes releasing its resources and all associated fds are * closed by applications. */ static void go7007_remove(struct v4l2_device *v4l2_dev) { struct go7007 *go = container_of(v4l2_dev, struct go7007, v4l2_dev); v4l2_device_unregister(v4l2_dev); if (go->hpi_ops->release) go->hpi_ops->release(go); if (go->i2c_adapter_online) { i2c_del_adapter(&go->i2c_adapter); go->i2c_adapter_online = 0; } kfree(go->boot_fw); go7007_v4l2_remove(go); kfree(go); } /* * Finalize the GO7007 hardware setup, register the on-board I2C adapter * (if used on this board), load the I2C client driver for the sensor * (SAA7115 or whatever) and other devices, and register the ALSA and V4L2 * interfaces. * * Must NOT be called with the hw_lock held. */ int go7007_register_encoder(struct go7007 *go, unsigned num_i2c_devs) { int i, ret; dev_info(go->dev, "go7007: registering new %s\n", go->name); go->v4l2_dev.release = go7007_remove; ret = v4l2_device_register(go->dev, &go->v4l2_dev); if (ret < 0) return ret; mutex_lock(&go->hw_lock); ret = go7007_init_encoder(go); mutex_unlock(&go->hw_lock); if (ret < 0) return ret; ret = go7007_v4l2_ctrl_init(go); if (ret < 0) return ret; if (!go->i2c_adapter_online && go->board_info->flags & GO7007_BOARD_USE_ONBOARD_I2C) { ret = go7007_i2c_init(go); if (ret < 0) return ret; go->i2c_adapter_online = 1; } if (go->i2c_adapter_online) { if (go->board_id == GO7007_BOARDID_ADS_USBAV_709) { /* Reset the TW9906 */ go7007_write_addr(go, 0x3c82, 0x0009); msleep(50); go7007_write_addr(go, 0x3c82, 0x000d); } for (i = 0; i < num_i2c_devs; ++i) init_i2c_module(&go->i2c_adapter, &go->board_info->i2c_devs[i]); if (go->tuner_type >= 0) { struct tuner_setup setup = { .addr = ADDR_UNSET, .type = go->tuner_type, .mode_mask = T_ANALOG_TV, }; v4l2_device_call_all(&go->v4l2_dev, 0, tuner, s_type_addr, &setup); } if (go->board_id == GO7007_BOARDID_ADLINK_MPG24) v4l2_subdev_call(go->sd_video, video, s_routing, 0, 0, go->channel_number + 1); } ret = go7007_v4l2_init(go); if (ret < 0) return ret; if (go->board_info->flags & GO7007_BOARD_HAS_AUDIO) { go->audio_enabled = 1; go7007_snd_init(go); } return 0; } EXPORT_SYMBOL(go7007_register_encoder); /* * Send the encode firmware to the encoder, which will cause it * to immediately start delivering the video and audio streams. * * Must be called with the hw_lock held. */ int go7007_start_encoder(struct go7007 *go) { u8 *fw; int fw_len, rv = 0, i; u16 intr_val, intr_data; go->modet_enable = 0; if (!go->dvd_mode) for (i = 0; i < 4; ++i) { if (go->modet[i].enable) { go->modet_enable = 1; continue; } go->modet[i].pixel_threshold = 32767; go->modet[i].motion_threshold = 32767; go->modet[i].mb_threshold = 32767; } if (go7007_construct_fw_image(go, &fw, &fw_len) < 0) return -1; if (go7007_send_firmware(go, fw, fw_len) < 0 || go7007_read_interrupt(go, &intr_val, &intr_data) < 0) { v4l2_err(&go->v4l2_dev, "error transferring firmware\n"); rv = -1; goto start_error; } go->state = STATE_DATA; go->parse_length = 0; go->seen_frame = 0; if (go7007_stream_start(go) < 0) { v4l2_err(&go->v4l2_dev, "error starting stream transfer\n"); rv = -1; goto start_error; } start_error: kfree(fw); return rv; } /* * Store a byte in the current video buffer, if there is one. */ static inline void store_byte(struct go7007_buffer *vb, u8 byte) { if (vb && vb->vb.v4l2_planes[0].bytesused < GO7007_BUF_SIZE) { u8 *ptr = vb2_plane_vaddr(&vb->vb, 0); ptr[vb->vb.v4l2_planes[0].bytesused++] = byte; } } /* * Deliver the last video buffer and get a new one to start writing to. */ static struct go7007_buffer *frame_boundary(struct go7007 *go, struct go7007_buffer *vb) { struct go7007_buffer *vb_tmp = NULL; u32 *bytesused = &vb->vb.v4l2_planes[0].bytesused; int i; if (vb) { if (vb->modet_active) { if (*bytesused + 216 < GO7007_BUF_SIZE) { for (i = 0; i < 216; ++i) store_byte(vb, go->active_map[i]); *bytesused -= 216; } else vb->modet_active = 0; } vb->vb.v4l2_buf.sequence = go->next_seq++; v4l2_get_timestamp(&vb->vb.v4l2_buf.timestamp); vb_tmp = vb; spin_lock(&go->spinlock); list_del(&vb->list); if (list_empty(&go->vidq_active)) vb = NULL; else vb = list_first_entry(&go->vidq_active, struct go7007_buffer, list); go->active_buf = vb; spin_unlock(&go->spinlock); vb2_buffer_done(&vb_tmp->vb, VB2_BUF_STATE_DONE); return vb; } spin_lock(&go->spinlock); if (!list_empty(&go->vidq_active)) vb = go->active_buf = list_first_entry(&go->vidq_active, struct go7007_buffer, list); spin_unlock(&go->spinlock); go->next_seq++; return vb; } static void write_bitmap_word(struct go7007 *go) { int x, y, i, stride = ((go->width >> 4) + 7) >> 3; for (i = 0; i < 16; ++i) { y = (((go->parse_length - 1) << 3) + i) / (go->width >> 4); x = (((go->parse_length - 1) << 3) + i) % (go->width >> 4); if (stride * y + (x >> 3) < sizeof(go->active_map)) go->active_map[stride * y + (x >> 3)] |= (go->modet_word & 1) << (x & 0x7); go->modet_word >>= 1; } } /* * Parse a chunk of the video stream into frames. The frames are not * delimited by the hardware, so we have to parse the frame boundaries * based on the type of video stream we're receiving. */ void go7007_parse_video_stream(struct go7007 *go, u8 *buf, int length) { struct go7007_buffer *vb = go->active_buf; int i, seq_start_code = -1, gop_start_code = -1, frame_start_code = -1; switch (go->format) { case V4L2_PIX_FMT_MPEG4: seq_start_code = 0xB0; gop_start_code = 0xB3; frame_start_code = 0xB6; break; case V4L2_PIX_FMT_MPEG1: case V4L2_PIX_FMT_MPEG2: seq_start_code = 0xB3; gop_start_code = 0xB8; frame_start_code = 0x00; break; } for (i = 0; i < length; ++i) { if (vb && vb->vb.v4l2_planes[0].bytesused >= GO7007_BUF_SIZE - 3) { v4l2_info(&go->v4l2_dev, "dropping oversized frame\n"); vb->vb.v4l2_planes[0].bytesused = 0; vb->frame_offset = 0; vb->modet_active = 0; vb = go->active_buf = NULL; } switch (go->state) { case STATE_DATA: switch (buf[i]) { case 0x00: go->state = STATE_00; break; case 0xFF: go->state = STATE_FF; break; default: store_byte(vb, buf[i]); break; } break; case STATE_00: switch (buf[i]) { case 0x00: go->state = STATE_00_00; break; case 0xFF: store_byte(vb, 0x00); go->state = STATE_FF; break; default: store_byte(vb, 0x00); store_byte(vb, buf[i]); go->state = STATE_DATA; break; } break; case STATE_00_00: switch (buf[i]) { case 0x00: store_byte(vb, 0x00); /* go->state remains STATE_00_00 */ break; case 0x01: go->state = STATE_00_00_01; break; case 0xFF: store_byte(vb, 0x00); store_byte(vb, 0x00); go->state = STATE_FF; break; default: store_byte(vb, 0x00); store_byte(vb, 0x00); store_byte(vb, buf[i]); go->state = STATE_DATA; break; } break; case STATE_00_00_01: if (buf[i] == 0xF8 && go->modet_enable == 0) { /* MODET start code, but MODET not enabled */ store_byte(vb, 0x00); store_byte(vb, 0x00); store_byte(vb, 0x01); store_byte(vb, 0xF8); go->state = STATE_DATA; break; } /* If this is the start of a new MPEG frame, * get a new buffer */ if ((go->format == V4L2_PIX_FMT_MPEG1 || go->format == V4L2_PIX_FMT_MPEG2 || go->format == V4L2_PIX_FMT_MPEG4) && (buf[i] == seq_start_code || buf[i] == gop_start_code || buf[i] == frame_start_code)) { if (vb == NULL || go->seen_frame) vb = frame_boundary(go, vb); go->seen_frame = buf[i] == frame_start_code; if (vb && go->seen_frame) vb->frame_offset = vb->vb.v4l2_planes[0].bytesused; } /* Handle any special chunk types, or just write the * start code to the (potentially new) buffer */ switch (buf[i]) { case 0xF5: /* timestamp */ go->parse_length = 12; go->state = STATE_UNPARSED; break; case 0xF6: /* vbi */ go->state = STATE_VBI_LEN_A; break; case 0xF8: /* MD map */ go->parse_length = 0; memset(go->active_map, 0, sizeof(go->active_map)); go->state = STATE_MODET_MAP; break; case 0xFF: /* Potential JPEG start code */ store_byte(vb, 0x00); store_byte(vb, 0x00); store_byte(vb, 0x01); go->state = STATE_FF; break; default: store_byte(vb, 0x00); store_byte(vb, 0x00); store_byte(vb, 0x01); store_byte(vb, buf[i]); go->state = STATE_DATA; break; } break; case STATE_FF: switch (buf[i]) { case 0x00: store_byte(vb, 0xFF); go->state = STATE_00; break; case 0xFF: store_byte(vb, 0xFF); /* go->state remains STATE_FF */ break; case 0xD8: if (go->format == V4L2_PIX_FMT_MJPEG) vb = frame_boundary(go, vb); /* fall through */ default: store_byte(vb, 0xFF); store_byte(vb, buf[i]); go->state = STATE_DATA; break; } break; case STATE_VBI_LEN_A: go->parse_length = buf[i] << 8; go->state = STATE_VBI_LEN_B; break; case STATE_VBI_LEN_B: go->parse_length |= buf[i]; if (go->parse_length > 0) go->state = STATE_UNPARSED; else go->state = STATE_DATA; break; case STATE_MODET_MAP: if (go->parse_length < 204) { if (go->parse_length & 1) { go->modet_word |= buf[i]; write_bitmap_word(go); } else go->modet_word = buf[i] << 8; } else if (go->parse_length == 207 && vb) { vb->modet_active = buf[i]; } if (++go->parse_length == 208) go->state = STATE_DATA; break; case STATE_UNPARSED: if (--go->parse_length == 0) go->state = STATE_DATA; break; } } } EXPORT_SYMBOL(go7007_parse_video_stream); /* * Allocate a new go7007 struct. Used by the hardware-specific probe. */ struct go7007 *go7007_alloc(const struct go7007_board_info *board, struct device *dev) { struct go7007 *go; int i; go = kzalloc(sizeof(struct go7007), GFP_KERNEL); if (go == NULL) return NULL; go->dev = dev; go->board_info = board; go->board_id = 0; go->tuner_type = -1; go->channel_number = 0; go->name[0] = 0; mutex_init(&go->hw_lock); init_waitqueue_head(&go->frame_waitq); spin_lock_init(&go->spinlock); go->status = STATUS_INIT; memset(&go->i2c_adapter, 0, sizeof(go->i2c_adapter)); go->i2c_adapter_online = 0; go->interrupt_available = 0; init_waitqueue_head(&go->interrupt_waitq); go->input = 0; go7007_update_board(go); go->encoder_h_halve = 0; go->encoder_v_halve = 0; go->encoder_subsample = 0; go->format = V4L2_PIX_FMT_MJPEG; go->bitrate = 1500000; go->fps_scale = 1; go->pali = 0; go->aspect_ratio = GO7007_RATIO_1_1; go->gop_size = 0; go->ipb = 0; go->closed_gop = 0; go->repeat_seqhead = 0; go->seq_header_enable = 0; go->gop_header_enable = 0; go->dvd_mode = 0; go->interlace_coding = 0; for (i = 0; i < 4; ++i) go->modet[i].enable = 0; for (i = 0; i < 1624; ++i) go->modet_map[i] = 0; go->audio_deliver = NULL; go->audio_enabled = 0; return go; } EXPORT_SYMBOL(go7007_alloc); void go7007_update_board(struct go7007 *go) { const struct go7007_board_info *board = go->board_info; if (board->sensor_flags & GO7007_SENSOR_TV) { go->standard = GO7007_STD_NTSC; go->std = V4L2_STD_NTSC_M; go->width = 720; go->height = 480; go->sensor_framerate = 30000; } else { go->standard = GO7007_STD_OTHER; go->width = board->sensor_width; go->height = board->sensor_height; go->sensor_framerate = board->sensor_framerate; } go->encoder_v_offset = board->sensor_v_offset; go->encoder_h_offset = board->sensor_h_offset; } EXPORT_SYMBOL(go7007_update_board); MODULE_LICENSE("GPL v2");
gpl-2.0
emceethemouth/kernel_mainline
drivers/uio/uio_netx.c
2312
4388
/* * UIO driver for Hilscher NetX based fieldbus cards (cifX, comX). * See http://www.hilscher.com for details. * * (C) 2007 Hans J. Koch <hjk@hansjkoch.de> * (C) 2008 Manuel Traut <manut@linutronix.de> * * Licensed under GPL version 2 only. * */ #include <linux/device.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/uio_driver.h> #define PCI_VENDOR_ID_HILSCHER 0x15CF #define PCI_DEVICE_ID_HILSCHER_NETX 0x0000 #define PCI_DEVICE_ID_HILSCHER_NETPLC 0x0010 #define PCI_SUBDEVICE_ID_NETPLC_RAM 0x0000 #define PCI_SUBDEVICE_ID_NETPLC_FLASH 0x0001 #define PCI_SUBDEVICE_ID_NXSB_PCA 0x3235 #define PCI_SUBDEVICE_ID_NXPCA 0x3335 #define DPM_HOST_INT_EN0 0xfff0 #define DPM_HOST_INT_STAT0 0xffe0 #define DPM_HOST_INT_MASK 0xe600ffff #define DPM_HOST_INT_GLOBAL_EN 0x80000000 static irqreturn_t netx_handler(int irq, struct uio_info *dev_info) { void __iomem *int_enable_reg = dev_info->mem[0].internal_addr + DPM_HOST_INT_EN0; void __iomem *int_status_reg = dev_info->mem[0].internal_addr + DPM_HOST_INT_STAT0; /* Is one of our interrupts enabled and active ? */ if (!(ioread32(int_enable_reg) & ioread32(int_status_reg) & DPM_HOST_INT_MASK)) return IRQ_NONE; /* Disable interrupt */ iowrite32(ioread32(int_enable_reg) & ~DPM_HOST_INT_GLOBAL_EN, int_enable_reg); return IRQ_HANDLED; } static int netx_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct uio_info *info; int bar; info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); if (!info) return -ENOMEM; if (pci_enable_device(dev)) goto out_free; if (pci_request_regions(dev, "netx")) goto out_disable; switch (id->device) { case PCI_DEVICE_ID_HILSCHER_NETX: bar = 0; info->name = "netx"; break; case PCI_DEVICE_ID_HILSCHER_NETPLC: bar = 0; info->name = "netplc"; break; default: bar = 2; info->name = "netx_plx"; } /* BAR0 or 2 points to the card's dual port memory */ info->mem[0].addr = pci_resource_start(dev, bar); if (!info->mem[0].addr) goto out_release; info->mem[0].internal_addr = ioremap(pci_resource_start(dev, bar), pci_resource_len(dev, bar)); if (!info->mem[0].internal_addr) goto out_release; info->mem[0].size = pci_resource_len(dev, bar); info->mem[0].memtype = UIO_MEM_PHYS; info->irq = dev->irq; info->irq_flags = IRQF_SHARED; info->handler = netx_handler; info->version = "0.0.1"; /* Make sure all interrupts are disabled */ iowrite32(0, info->mem[0].internal_addr + DPM_HOST_INT_EN0); if (uio_register_device(&dev->dev, info)) goto out_unmap; pci_set_drvdata(dev, info); dev_info(&dev->dev, "Found %s card, registered UIO device.\n", info->name); return 0; out_unmap: iounmap(info->mem[0].internal_addr); out_release: pci_release_regions(dev); out_disable: pci_disable_device(dev); out_free: kfree(info); return -ENODEV; } static void netx_pci_remove(struct pci_dev *dev) { struct uio_info *info = pci_get_drvdata(dev); /* Disable all interrupts */ iowrite32(0, info->mem[0].internal_addr + DPM_HOST_INT_EN0); uio_unregister_device(info); pci_release_regions(dev); pci_disable_device(dev); iounmap(info->mem[0].internal_addr); kfree(info); } static struct pci_device_id netx_pci_ids[] = { { .vendor = PCI_VENDOR_ID_HILSCHER, .device = PCI_DEVICE_ID_HILSCHER_NETX, .subvendor = 0, .subdevice = 0, }, { .vendor = PCI_VENDOR_ID_HILSCHER, .device = PCI_DEVICE_ID_HILSCHER_NETPLC, .subvendor = PCI_VENDOR_ID_HILSCHER, .subdevice = PCI_SUBDEVICE_ID_NETPLC_RAM, }, { .vendor = PCI_VENDOR_ID_HILSCHER, .device = PCI_DEVICE_ID_HILSCHER_NETPLC, .subvendor = PCI_VENDOR_ID_HILSCHER, .subdevice = PCI_SUBDEVICE_ID_NETPLC_FLASH, }, { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9030, .subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_SUBDEVICE_ID_NXSB_PCA, }, { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9030, .subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_SUBDEVICE_ID_NXPCA, }, { 0, } }; static struct pci_driver netx_pci_driver = { .name = "netx", .id_table = netx_pci_ids, .probe = netx_pci_probe, .remove = netx_pci_remove, }; module_pci_driver(netx_pci_driver); MODULE_DEVICE_TABLE(pci, netx_pci_ids); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Hans J. Koch, Manuel Traut");
gpl-2.0
jawad6233/MT6795.kernel
alps/kernel-3.10/drivers/video/neofb.c
2312
56594
/* * linux/drivers/video/neofb.c -- NeoMagic Framebuffer Driver * * Copyright (c) 2001-2002 Denis Oliver Kropp <dok@directfb.org> * * * Card specific code is based on XFree86's neomagic driver. * Framebuffer framework code is based on code of cyber2000fb. * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. * * * 0.4.1 * - Cosmetic changes (dok) * * 0.4 * - Toshiba Libretto support, allow modes larger than LCD size if * LCD is disabled, keep BIOS settings if internal/external display * haven't been enabled explicitly * (Thomas J. Moore <dark@mama.indstate.edu>) * * 0.3.3 * - Porting over to new fbdev api. (jsimmons) * * 0.3.2 * - got rid of all floating point (dok) * * 0.3.1 * - added module license (dok) * * 0.3 * - hardware accelerated clear and move for 2200 and above (dok) * - maximum allowed dotclock is handled now (dok) * * 0.2.1 * - correct panning after X usage (dok) * - added module and kernel parameters (dok) * - no stretching if external display is enabled (dok) * * 0.2 * - initial version (dok) * * * TODO * - ioctl for internal/external switching * - blanking * - 32bit depth support, maybe impossible * - disable pan-on-sync, need specs * * BUGS * - white margin on bootup like with tdfxfb (colormap problem?) * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/pci.h> #include <linux/init.h> #ifdef CONFIG_TOSHIBA #include <linux/toshiba.h> #endif #include <asm/io.h> #include <asm/irq.h> #include <asm/pgtable.h> #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif #include <video/vga.h> #include <video/neomagic.h> #define NEOFB_VERSION "0.4.2" /* --------------------------------------------------------------------- */ static bool internal; static bool external; static bool libretto; static bool nostretch; static bool nopciburst; static char *mode_option = NULL; #ifdef MODULE MODULE_AUTHOR("(c) 2001-2002 Denis Oliver Kropp <dok@convergence.de>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("FBDev driver for NeoMagic PCI Chips"); module_param(internal, bool, 0); MODULE_PARM_DESC(internal, "Enable output on internal LCD Display."); module_param(external, bool, 0); MODULE_PARM_DESC(external, "Enable output on external CRT."); module_param(libretto, bool, 0); MODULE_PARM_DESC(libretto, "Force Libretto 100/110 800x480 LCD."); module_param(nostretch, bool, 0); MODULE_PARM_DESC(nostretch, "Disable stretching of modes smaller than LCD."); module_param(nopciburst, bool, 0); MODULE_PARM_DESC(nopciburst, "Disable PCI burst mode."); module_param(mode_option, charp, 0); MODULE_PARM_DESC(mode_option, "Preferred video mode ('640x480-8@60', etc)"); #endif /* --------------------------------------------------------------------- */ static biosMode bios8[] = { {320, 240, 0x40}, {300, 400, 0x42}, {640, 400, 0x20}, {640, 480, 0x21}, {800, 600, 0x23}, {1024, 768, 0x25}, }; static biosMode bios16[] = { {320, 200, 0x2e}, {320, 240, 0x41}, {300, 400, 0x43}, {640, 480, 0x31}, {800, 600, 0x34}, {1024, 768, 0x37}, }; static biosMode bios24[] = { {640, 480, 0x32}, {800, 600, 0x35}, {1024, 768, 0x38} }; #ifdef NO_32BIT_SUPPORT_YET /* FIXME: guessed values, wrong */ static biosMode bios32[] = { {640, 480, 0x33}, {800, 600, 0x36}, {1024, 768, 0x39} }; #endif static inline void write_le32(int regindex, u32 val, const struct neofb_par *par) { writel(val, par->neo2200 + par->cursorOff + regindex); } static int neoFindMode(int xres, int yres, int depth) { int xres_s; int i, size; biosMode *mode; switch (depth) { case 8: size = ARRAY_SIZE(bios8); mode = bios8; break; case 16: size = ARRAY_SIZE(bios16); mode = bios16; break; case 24: size = ARRAY_SIZE(bios24); mode = bios24; break; #ifdef NO_32BIT_SUPPORT_YET case 32: size = ARRAY_SIZE(bios32); mode = bios32; break; #endif default: return 0; } for (i = 0; i < size; i++) { if (xres <= mode[i].x_res) { xres_s = mode[i].x_res; for (; i < size; i++) { if (mode[i].x_res != xres_s) return mode[i - 1].mode; if (yres <= mode[i].y_res) return mode[i].mode; } } } return mode[size - 1].mode; } /* * neoCalcVCLK -- * * Determine the closest clock frequency to the one requested. */ #define MAX_N 127 #define MAX_D 31 #define MAX_F 1 static void neoCalcVCLK(const struct fb_info *info, struct neofb_par *par, long freq) { int n, d, f; int n_best = 0, d_best = 0, f_best = 0; long f_best_diff = 0x7ffff; for (f = 0; f <= MAX_F; f++) for (d = 0; d <= MAX_D; d++) for (n = 0; n <= MAX_N; n++) { long f_out; long f_diff; f_out = ((14318 * (n + 1)) / (d + 1)) >> f; f_diff = abs(f_out - freq); if (f_diff <= f_best_diff) { f_best_diff = f_diff; n_best = n; d_best = d; f_best = f; } if (f_out > freq) break; } if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2230 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2360 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2380) { /* NOT_DONE: We are trying the full range of the 2200 clock. We should be able to try n up to 2047 */ par->VCLK3NumeratorLow = n_best; par->VCLK3NumeratorHigh = (f_best << 7); } else par->VCLK3NumeratorLow = n_best | (f_best << 7); par->VCLK3Denominator = d_best; #ifdef NEOFB_DEBUG printk(KERN_DEBUG "neoVCLK: f:%ld NumLow=%d NumHi=%d Den=%d Df=%ld\n", freq, par->VCLK3NumeratorLow, par->VCLK3NumeratorHigh, par->VCLK3Denominator, f_best_diff); #endif } /* * vgaHWInit -- * Handle the initialization, etc. of a screen. * Return FALSE on failure. */ static int vgaHWInit(const struct fb_var_screeninfo *var, struct neofb_par *par) { int hsync_end = var->xres + var->right_margin + var->hsync_len; int htotal = (hsync_end + var->left_margin) >> 3; int vsync_start = var->yres + var->lower_margin; int vsync_end = vsync_start + var->vsync_len; int vtotal = vsync_end + var->upper_margin; par->MiscOutReg = 0x23; if (!(var->sync & FB_SYNC_HOR_HIGH_ACT)) par->MiscOutReg |= 0x40; if (!(var->sync & FB_SYNC_VERT_HIGH_ACT)) par->MiscOutReg |= 0x80; /* * Time Sequencer */ par->Sequencer[0] = 0x00; par->Sequencer[1] = 0x01; par->Sequencer[2] = 0x0F; par->Sequencer[3] = 0x00; /* Font select */ par->Sequencer[4] = 0x0E; /* Misc */ /* * CRTC Controller */ par->CRTC[0] = htotal - 5; par->CRTC[1] = (var->xres >> 3) - 1; par->CRTC[2] = (var->xres >> 3) - 1; par->CRTC[3] = ((htotal - 1) & 0x1F) | 0x80; par->CRTC[4] = ((var->xres + var->right_margin) >> 3); par->CRTC[5] = (((htotal - 1) & 0x20) << 2) | (((hsync_end >> 3)) & 0x1F); par->CRTC[6] = (vtotal - 2) & 0xFF; par->CRTC[7] = (((vtotal - 2) & 0x100) >> 8) | (((var->yres - 1) & 0x100) >> 7) | ((vsync_start & 0x100) >> 6) | (((var->yres - 1) & 0x100) >> 5) | 0x10 | (((vtotal - 2) & 0x200) >> 4) | (((var->yres - 1) & 0x200) >> 3) | ((vsync_start & 0x200) >> 2); par->CRTC[8] = 0x00; par->CRTC[9] = (((var->yres - 1) & 0x200) >> 4) | 0x40; if (var->vmode & FB_VMODE_DOUBLE) par->CRTC[9] |= 0x80; par->CRTC[10] = 0x00; par->CRTC[11] = 0x00; par->CRTC[12] = 0x00; par->CRTC[13] = 0x00; par->CRTC[14] = 0x00; par->CRTC[15] = 0x00; par->CRTC[16] = vsync_start & 0xFF; par->CRTC[17] = (vsync_end & 0x0F) | 0x20; par->CRTC[18] = (var->yres - 1) & 0xFF; par->CRTC[19] = var->xres_virtual >> 4; par->CRTC[20] = 0x00; par->CRTC[21] = (var->yres - 1) & 0xFF; par->CRTC[22] = (vtotal - 1) & 0xFF; par->CRTC[23] = 0xC3; par->CRTC[24] = 0xFF; /* * are these unnecessary? * vgaHWHBlankKGA(mode, regp, 0, KGA_FIX_OVERSCAN | KGA_ENABLE_ON_ZERO); * vgaHWVBlankKGA(mode, regp, 0, KGA_FIX_OVERSCAN | KGA_ENABLE_ON_ZERO); */ /* * Graphics Display Controller */ par->Graphics[0] = 0x00; par->Graphics[1] = 0x00; par->Graphics[2] = 0x00; par->Graphics[3] = 0x00; par->Graphics[4] = 0x00; par->Graphics[5] = 0x40; par->Graphics[6] = 0x05; /* only map 64k VGA memory !!!! */ par->Graphics[7] = 0x0F; par->Graphics[8] = 0xFF; par->Attribute[0] = 0x00; /* standard colormap translation */ par->Attribute[1] = 0x01; par->Attribute[2] = 0x02; par->Attribute[3] = 0x03; par->Attribute[4] = 0x04; par->Attribute[5] = 0x05; par->Attribute[6] = 0x06; par->Attribute[7] = 0x07; par->Attribute[8] = 0x08; par->Attribute[9] = 0x09; par->Attribute[10] = 0x0A; par->Attribute[11] = 0x0B; par->Attribute[12] = 0x0C; par->Attribute[13] = 0x0D; par->Attribute[14] = 0x0E; par->Attribute[15] = 0x0F; par->Attribute[16] = 0x41; par->Attribute[17] = 0xFF; par->Attribute[18] = 0x0F; par->Attribute[19] = 0x00; par->Attribute[20] = 0x00; return 0; } static void vgaHWLock(struct vgastate *state) { /* Protect CRTC[0-7] */ vga_wcrt(state->vgabase, 0x11, vga_rcrt(state->vgabase, 0x11) | 0x80); } static void vgaHWUnlock(void) { /* Unprotect CRTC[0-7] */ vga_wcrt(NULL, 0x11, vga_rcrt(NULL, 0x11) & ~0x80); } static void neoLock(struct vgastate *state) { vga_wgfx(state->vgabase, 0x09, 0x00); vgaHWLock(state); } static void neoUnlock(void) { vgaHWUnlock(); vga_wgfx(NULL, 0x09, 0x26); } /* * VGA Palette management */ static int paletteEnabled = 0; static inline void VGAenablePalette(void) { vga_r(NULL, VGA_IS1_RC); vga_w(NULL, VGA_ATT_W, 0x00); paletteEnabled = 1; } static inline void VGAdisablePalette(void) { vga_r(NULL, VGA_IS1_RC); vga_w(NULL, VGA_ATT_W, 0x20); paletteEnabled = 0; } static inline void VGAwATTR(u8 index, u8 value) { if (paletteEnabled) index &= ~0x20; else index |= 0x20; vga_r(NULL, VGA_IS1_RC); vga_wattr(NULL, index, value); } static void vgaHWProtect(int on) { unsigned char tmp; tmp = vga_rseq(NULL, 0x01); if (on) { /* * Turn off screen and disable sequencer. */ vga_wseq(NULL, 0x00, 0x01); /* Synchronous Reset */ vga_wseq(NULL, 0x01, tmp | 0x20); /* disable the display */ VGAenablePalette(); } else { /* * Reenable sequencer, then turn on screen. */ vga_wseq(NULL, 0x01, tmp & ~0x20); /* reenable display */ vga_wseq(NULL, 0x00, 0x03); /* clear synchronousreset */ VGAdisablePalette(); } } static void vgaHWRestore(const struct fb_info *info, const struct neofb_par *par) { int i; vga_w(NULL, VGA_MIS_W, par->MiscOutReg); for (i = 1; i < 5; i++) vga_wseq(NULL, i, par->Sequencer[i]); /* Ensure CRTC registers 0-7 are unlocked by clearing bit 7 or CRTC[17] */ vga_wcrt(NULL, 17, par->CRTC[17] & ~0x80); for (i = 0; i < 25; i++) vga_wcrt(NULL, i, par->CRTC[i]); for (i = 0; i < 9; i++) vga_wgfx(NULL, i, par->Graphics[i]); VGAenablePalette(); for (i = 0; i < 21; i++) VGAwATTR(i, par->Attribute[i]); VGAdisablePalette(); } /* -------------------- Hardware specific routines ------------------------- */ /* * Hardware Acceleration for Neo2200+ */ static inline int neo2200_sync(struct fb_info *info) { struct neofb_par *par = info->par; while (readl(&par->neo2200->bltStat) & 1) cpu_relax(); return 0; } static inline void neo2200_wait_fifo(struct fb_info *info, int requested_fifo_space) { // ndev->neo.waitfifo_calls++; // ndev->neo.waitfifo_sum += requested_fifo_space; /* FIXME: does not work if (neo_fifo_space < requested_fifo_space) { neo_fifo_waitcycles++; while (1) { neo_fifo_space = (neo2200->bltStat >> 8); if (neo_fifo_space >= requested_fifo_space) break; } } else { neo_fifo_cache_hits++; } neo_fifo_space -= requested_fifo_space; */ neo2200_sync(info); } static inline void neo2200_accel_init(struct fb_info *info, struct fb_var_screeninfo *var) { struct neofb_par *par = info->par; Neo2200 __iomem *neo2200 = par->neo2200; u32 bltMod, pitch; neo2200_sync(info); switch (var->bits_per_pixel) { case 8: bltMod = NEO_MODE1_DEPTH8; pitch = var->xres_virtual; break; case 15: case 16: bltMod = NEO_MODE1_DEPTH16; pitch = var->xres_virtual * 2; break; case 24: bltMod = NEO_MODE1_DEPTH24; pitch = var->xres_virtual * 3; break; default: printk(KERN_ERR "neofb: neo2200_accel_init: unexpected bits per pixel!\n"); return; } writel(bltMod << 16, &neo2200->bltStat); writel((pitch << 16) | pitch, &neo2200->pitch); } /* --------------------------------------------------------------------- */ static int neofb_open(struct fb_info *info, int user) { struct neofb_par *par = info->par; if (!par->ref_count) { memset(&par->state, 0, sizeof(struct vgastate)); par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS; save_vga(&par->state); } par->ref_count++; return 0; } static int neofb_release(struct fb_info *info, int user) { struct neofb_par *par = info->par; if (!par->ref_count) return -EINVAL; if (par->ref_count == 1) { restore_vga(&par->state); } par->ref_count--; return 0; } static int neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct neofb_par *par = info->par; int memlen, vramlen; int mode_ok = 0; DBG("neofb_check_var"); if (PICOS2KHZ(var->pixclock) > par->maxClock) return -EINVAL; /* Is the mode larger than the LCD panel? */ if (par->internal_display && ((var->xres > par->NeoPanelWidth) || (var->yres > par->NeoPanelHeight))) { printk(KERN_INFO "Mode (%dx%d) larger than the LCD panel (%dx%d)\n", var->xres, var->yres, par->NeoPanelWidth, par->NeoPanelHeight); return -EINVAL; } /* Is the mode one of the acceptable sizes? */ if (!par->internal_display) mode_ok = 1; else { switch (var->xres) { case 1280: if (var->yres == 1024) mode_ok = 1; break; case 1024: if (var->yres == 768) mode_ok = 1; break; case 800: if (var->yres == (par->libretto ? 480 : 600)) mode_ok = 1; break; case 640: if (var->yres == 480) mode_ok = 1; break; } } if (!mode_ok) { printk(KERN_INFO "Mode (%dx%d) won't display properly on LCD\n", var->xres, var->yres); return -EINVAL; } var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.msb_right = 0; var->transp.offset = 0; var->transp.length = 0; switch (var->bits_per_pixel) { case 8: /* PSEUDOCOLOUR, 256 */ var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; break; case 16: /* DIRECTCOLOUR, 64k */ var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; break; case 24: /* TRUECOLOUR, 16m */ var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; break; #ifdef NO_32BIT_SUPPORT_YET case 32: /* TRUECOLOUR, 16m */ var->transp.offset = 24; var->transp.length = 8; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; break; #endif default: printk(KERN_WARNING "neofb: no support for %dbpp\n", var->bits_per_pixel); return -EINVAL; } vramlen = info->fix.smem_len; if (vramlen > 4 * 1024 * 1024) vramlen = 4 * 1024 * 1024; if (var->xres_virtual < var->xres) var->xres_virtual = var->xres; memlen = var->xres_virtual * var->bits_per_pixel * var->yres_virtual >> 3; if (memlen > vramlen) { var->yres_virtual = vramlen * 8 / (var->xres_virtual * var->bits_per_pixel); memlen = var->xres_virtual * var->bits_per_pixel * var->yres_virtual / 8; } /* we must round yres/xres down, we already rounded y/xres_virtual up if it was possible. We should return -EINVAL, but I disagree */ if (var->yres_virtual < var->yres) var->yres = var->yres_virtual; if (var->xoffset + var->xres > var->xres_virtual) var->xoffset = var->xres_virtual - var->xres; if (var->yoffset + var->yres > var->yres_virtual) var->yoffset = var->yres_virtual - var->yres; var->nonstd = 0; var->height = -1; var->width = -1; if (var->bits_per_pixel >= 24 || !par->neo2200) var->accel_flags &= ~FB_ACCELF_TEXT; return 0; } static int neofb_set_par(struct fb_info *info) { struct neofb_par *par = info->par; unsigned char temp; int i, clock_hi = 0; int lcd_stretch; int hoffset, voffset; int vsync_start, vtotal; DBG("neofb_set_par"); neoUnlock(); vgaHWProtect(1); /* Blank the screen */ vsync_start = info->var.yres + info->var.lower_margin; vtotal = vsync_start + info->var.vsync_len + info->var.upper_margin; /* * This will allocate the datastructure and initialize all of the * generic VGA registers. */ if (vgaHWInit(&info->var, par)) return -EINVAL; /* * The default value assigned by vgaHW.c is 0x41, but this does * not work for NeoMagic. */ par->Attribute[16] = 0x01; switch (info->var.bits_per_pixel) { case 8: par->CRTC[0x13] = info->var.xres_virtual >> 3; par->ExtCRTOffset = info->var.xres_virtual >> 11; par->ExtColorModeSelect = 0x11; break; case 16: par->CRTC[0x13] = info->var.xres_virtual >> 2; par->ExtCRTOffset = info->var.xres_virtual >> 10; par->ExtColorModeSelect = 0x13; break; case 24: par->CRTC[0x13] = (info->var.xres_virtual * 3) >> 3; par->ExtCRTOffset = (info->var.xres_virtual * 3) >> 11; par->ExtColorModeSelect = 0x14; break; #ifdef NO_32BIT_SUPPORT_YET case 32: /* FIXME: guessed values */ par->CRTC[0x13] = info->var.xres_virtual >> 1; par->ExtCRTOffset = info->var.xres_virtual >> 9; par->ExtColorModeSelect = 0x15; break; #endif default: break; } par->ExtCRTDispAddr = 0x10; /* Vertical Extension */ par->VerticalExt = (((vtotal - 2) & 0x400) >> 10) | (((info->var.yres - 1) & 0x400) >> 9) | (((vsync_start) & 0x400) >> 8) | (((vsync_start) & 0x400) >> 7); /* Fast write bursts on unless disabled. */ if (par->pci_burst) par->SysIfaceCntl1 = 0x30; else par->SysIfaceCntl1 = 0x00; par->SysIfaceCntl2 = 0xc0; /* VESA Bios sets this to 0x80! */ /* Initialize: by default, we want display config register to be read */ par->PanelDispCntlRegRead = 1; /* Enable any user specified display devices. */ par->PanelDispCntlReg1 = 0x00; if (par->internal_display) par->PanelDispCntlReg1 |= 0x02; if (par->external_display) par->PanelDispCntlReg1 |= 0x01; /* If the user did not specify any display devices, then... */ if (par->PanelDispCntlReg1 == 0x00) { /* Default to internal (i.e., LCD) only. */ par->PanelDispCntlReg1 = vga_rgfx(NULL, 0x20) & 0x03; } /* If we are using a fixed mode, then tell the chip we are. */ switch (info->var.xres) { case 1280: par->PanelDispCntlReg1 |= 0x60; break; case 1024: par->PanelDispCntlReg1 |= 0x40; break; case 800: par->PanelDispCntlReg1 |= 0x20; break; case 640: default: break; } /* Setup shadow register locking. */ switch (par->PanelDispCntlReg1 & 0x03) { case 0x01: /* External CRT only mode: */ par->GeneralLockReg = 0x00; /* We need to program the VCLK for external display only mode. */ par->ProgramVCLK = 1; break; case 0x02: /* Internal LCD only mode: */ case 0x03: /* Simultaneous internal/external (LCD/CRT) mode: */ par->GeneralLockReg = 0x01; /* Don't program the VCLK when using the LCD. */ par->ProgramVCLK = 0; break; } /* * If the screen is to be stretched, turn on stretching for the * various modes. * * OPTION_LCD_STRETCH means stretching should be turned off! */ par->PanelDispCntlReg2 = 0x00; par->PanelDispCntlReg3 = 0x00; if (par->lcd_stretch && (par->PanelDispCntlReg1 == 0x02) && /* LCD only */ (info->var.xres != par->NeoPanelWidth)) { switch (info->var.xres) { case 320: /* Needs testing. KEM -- 24 May 98 */ case 400: /* Needs testing. KEM -- 24 May 98 */ case 640: case 800: case 1024: lcd_stretch = 1; par->PanelDispCntlReg2 |= 0xC6; break; default: lcd_stretch = 0; /* No stretching in these modes. */ } } else lcd_stretch = 0; /* * If the screen is to be centerd, turn on the centering for the * various modes. */ par->PanelVertCenterReg1 = 0x00; par->PanelVertCenterReg2 = 0x00; par->PanelVertCenterReg3 = 0x00; par->PanelVertCenterReg4 = 0x00; par->PanelVertCenterReg5 = 0x00; par->PanelHorizCenterReg1 = 0x00; par->PanelHorizCenterReg2 = 0x00; par->PanelHorizCenterReg3 = 0x00; par->PanelHorizCenterReg4 = 0x00; par->PanelHorizCenterReg5 = 0x00; if (par->PanelDispCntlReg1 & 0x02) { if (info->var.xres == par->NeoPanelWidth) { /* * No centering required when the requested display width * equals the panel width. */ } else { par->PanelDispCntlReg2 |= 0x01; par->PanelDispCntlReg3 |= 0x10; /* Calculate the horizontal and vertical offsets. */ if (!lcd_stretch) { hoffset = ((par->NeoPanelWidth - info->var.xres) >> 4) - 1; voffset = ((par->NeoPanelHeight - info->var.yres) >> 1) - 2; } else { /* Stretched modes cannot be centered. */ hoffset = 0; voffset = 0; } switch (info->var.xres) { case 320: /* Needs testing. KEM -- 24 May 98 */ par->PanelHorizCenterReg3 = hoffset; par->PanelVertCenterReg2 = voffset; break; case 400: /* Needs testing. KEM -- 24 May 98 */ par->PanelHorizCenterReg4 = hoffset; par->PanelVertCenterReg1 = voffset; break; case 640: par->PanelHorizCenterReg1 = hoffset; par->PanelVertCenterReg3 = voffset; break; case 800: par->PanelHorizCenterReg2 = hoffset; par->PanelVertCenterReg4 = voffset; break; case 1024: par->PanelHorizCenterReg5 = hoffset; par->PanelVertCenterReg5 = voffset; break; case 1280: default: /* No centering in these modes. */ break; } } } par->biosMode = neoFindMode(info->var.xres, info->var.yres, info->var.bits_per_pixel); /* * Calculate the VCLK that most closely matches the requested dot * clock. */ neoCalcVCLK(info, par, PICOS2KHZ(info->var.pixclock)); /* Since we program the clocks ourselves, always use VCLK3. */ par->MiscOutReg |= 0x0C; /* alread unlocked above */ /* BOGUS vga_wgfx(NULL, 0x09, 0x26); */ /* don't know what this is, but it's 0 from bootup anyway */ vga_wgfx(NULL, 0x15, 0x00); /* was set to 0x01 by my bios in text and vesa modes */ vga_wgfx(NULL, 0x0A, par->GeneralLockReg); /* * The color mode needs to be set before calling vgaHWRestore * to ensure the DAC is initialized properly. * * NOTE: Make sure we don't change bits make sure we don't change * any reserved bits. */ temp = vga_rgfx(NULL, 0x90); switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2070: temp &= 0xF0; /* Save bits 7:4 */ temp |= (par->ExtColorModeSelect & ~0xF0); break; case FB_ACCEL_NEOMAGIC_NM2090: case FB_ACCEL_NEOMAGIC_NM2093: case FB_ACCEL_NEOMAGIC_NM2097: case FB_ACCEL_NEOMAGIC_NM2160: case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: temp &= 0x70; /* Save bits 6:4 */ temp |= (par->ExtColorModeSelect & ~0x70); break; } vga_wgfx(NULL, 0x90, temp); /* * In some rare cases a lockup might occur if we don't delay * here. (Reported by Miles Lane) */ //mdelay(200); /* * Disable horizontal and vertical graphics and text expansions so * that vgaHWRestore works properly. */ temp = vga_rgfx(NULL, 0x25); temp &= 0x39; vga_wgfx(NULL, 0x25, temp); /* * Sleep for 200ms to make sure that the two operations above have * had time to take effect. */ mdelay(200); /* * This function handles restoring the generic VGA registers. */ vgaHWRestore(info, par); /* linear colormap for non palettized modes */ switch (info->var.bits_per_pixel) { case 8: /* PseudoColor, 256 */ info->fix.visual = FB_VISUAL_PSEUDOCOLOR; break; case 16: /* TrueColor, 64k */ info->fix.visual = FB_VISUAL_TRUECOLOR; for (i = 0; i < 64; i++) { outb(i, 0x3c8); outb(i << 1, 0x3c9); outb(i, 0x3c9); outb(i << 1, 0x3c9); } break; case 24: #ifdef NO_32BIT_SUPPORT_YET case 32: #endif /* TrueColor, 16m */ info->fix.visual = FB_VISUAL_TRUECOLOR; for (i = 0; i < 256; i++) { outb(i, 0x3c8); outb(i, 0x3c9); outb(i, 0x3c9); outb(i, 0x3c9); } break; } vga_wgfx(NULL, 0x0E, par->ExtCRTDispAddr); vga_wgfx(NULL, 0x0F, par->ExtCRTOffset); temp = vga_rgfx(NULL, 0x10); temp &= 0x0F; /* Save bits 3:0 */ temp |= (par->SysIfaceCntl1 & ~0x0F); /* VESA Bios sets bit 1! */ vga_wgfx(NULL, 0x10, temp); vga_wgfx(NULL, 0x11, par->SysIfaceCntl2); vga_wgfx(NULL, 0x15, 0 /*par->SingleAddrPage */ ); vga_wgfx(NULL, 0x16, 0 /*par->DualAddrPage */ ); temp = vga_rgfx(NULL, 0x20); switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2070: temp &= 0xFC; /* Save bits 7:2 */ temp |= (par->PanelDispCntlReg1 & ~0xFC); break; case FB_ACCEL_NEOMAGIC_NM2090: case FB_ACCEL_NEOMAGIC_NM2093: case FB_ACCEL_NEOMAGIC_NM2097: case FB_ACCEL_NEOMAGIC_NM2160: temp &= 0xDC; /* Save bits 7:6,4:2 */ temp |= (par->PanelDispCntlReg1 & ~0xDC); break; case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: temp &= 0x98; /* Save bits 7,4:3 */ temp |= (par->PanelDispCntlReg1 & ~0x98); break; } vga_wgfx(NULL, 0x20, temp); temp = vga_rgfx(NULL, 0x25); temp &= 0x38; /* Save bits 5:3 */ temp |= (par->PanelDispCntlReg2 & ~0x38); vga_wgfx(NULL, 0x25, temp); if (info->fix.accel != FB_ACCEL_NEOMAGIC_NM2070) { temp = vga_rgfx(NULL, 0x30); temp &= 0xEF; /* Save bits 7:5 and bits 3:0 */ temp |= (par->PanelDispCntlReg3 & ~0xEF); vga_wgfx(NULL, 0x30, temp); } vga_wgfx(NULL, 0x28, par->PanelVertCenterReg1); vga_wgfx(NULL, 0x29, par->PanelVertCenterReg2); vga_wgfx(NULL, 0x2a, par->PanelVertCenterReg3); if (info->fix.accel != FB_ACCEL_NEOMAGIC_NM2070) { vga_wgfx(NULL, 0x32, par->PanelVertCenterReg4); vga_wgfx(NULL, 0x33, par->PanelHorizCenterReg1); vga_wgfx(NULL, 0x34, par->PanelHorizCenterReg2); vga_wgfx(NULL, 0x35, par->PanelHorizCenterReg3); } if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2160) vga_wgfx(NULL, 0x36, par->PanelHorizCenterReg4); if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2230 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2360 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2380) { vga_wgfx(NULL, 0x36, par->PanelHorizCenterReg4); vga_wgfx(NULL, 0x37, par->PanelVertCenterReg5); vga_wgfx(NULL, 0x38, par->PanelHorizCenterReg5); clock_hi = 1; } /* Program VCLK3 if needed. */ if (par->ProgramVCLK && ((vga_rgfx(NULL, 0x9B) != par->VCLK3NumeratorLow) || (vga_rgfx(NULL, 0x9F) != par->VCLK3Denominator) || (clock_hi && ((vga_rgfx(NULL, 0x8F) & ~0x0f) != (par->VCLK3NumeratorHigh & ~0x0F))))) { vga_wgfx(NULL, 0x9B, par->VCLK3NumeratorLow); if (clock_hi) { temp = vga_rgfx(NULL, 0x8F); temp &= 0x0F; /* Save bits 3:0 */ temp |= (par->VCLK3NumeratorHigh & ~0x0F); vga_wgfx(NULL, 0x8F, temp); } vga_wgfx(NULL, 0x9F, par->VCLK3Denominator); } if (par->biosMode) vga_wcrt(NULL, 0x23, par->biosMode); vga_wgfx(NULL, 0x93, 0xc0); /* Gives 5x faster framebuffer writes !!! */ /* Program vertical extension register */ if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2230 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2360 || info->fix.accel == FB_ACCEL_NEOMAGIC_NM2380) { vga_wcrt(NULL, 0x70, par->VerticalExt); } vgaHWProtect(0); /* Turn on screen */ /* Calling this also locks offset registers required in update_start */ neoLock(&par->state); info->fix.line_length = info->var.xres_virtual * (info->var.bits_per_pixel >> 3); switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: neo2200_accel_init(info, &info->var); break; default: break; } return 0; } /* * Pan or Wrap the Display */ static int neofb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct neofb_par *par = info->par; struct vgastate *state = &par->state; int oldExtCRTDispAddr; int Base; DBG("neofb_update_start"); Base = (var->yoffset * info->var.xres_virtual + var->xoffset) >> 2; Base *= (info->var.bits_per_pixel + 7) / 8; neoUnlock(); /* * These are the generic starting address registers. */ vga_wcrt(state->vgabase, 0x0C, (Base & 0x00FF00) >> 8); vga_wcrt(state->vgabase, 0x0D, (Base & 0x00FF)); /* * Make sure we don't clobber some other bits that might already * have been set. NOTE: NM2200 has a writable bit 3, but it shouldn't * be needed. */ oldExtCRTDispAddr = vga_rgfx(NULL, 0x0E); vga_wgfx(state->vgabase, 0x0E, (((Base >> 16) & 0x0f) | (oldExtCRTDispAddr & 0xf0))); neoLock(state); return 0; } static int neofb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *fb) { if (regno >= fb->cmap.len || regno > 255) return -EINVAL; if (fb->var.bits_per_pixel <= 8) { outb(regno, 0x3c8); outb(red >> 10, 0x3c9); outb(green >> 10, 0x3c9); outb(blue >> 10, 0x3c9); } else if (regno < 16) { switch (fb->var.bits_per_pixel) { case 16: ((u32 *) fb->pseudo_palette)[regno] = ((red & 0xf800)) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); break; case 24: ((u32 *) fb->pseudo_palette)[regno] = ((red & 0xff00) << 8) | ((green & 0xff00)) | ((blue & 0xff00) >> 8); break; #ifdef NO_32BIT_SUPPORT_YET case 32: ((u32 *) fb->pseudo_palette)[regno] = ((transp & 0xff00) << 16) | ((red & 0xff00) << 8) | ((green & 0xff00)) | ((blue & 0xff00) >> 8); break; #endif default: return 1; } } return 0; } /* * (Un)Blank the display. */ static int neofb_blank(int blank_mode, struct fb_info *info) { /* * Blank the screen if blank_mode != 0, else unblank. * Return 0 if blanking succeeded, != 0 if un-/blanking failed due to * e.g. a video mode which doesn't support it. Implements VESA suspend * and powerdown modes for monitors, and backlight control on LCDs. * blank_mode == 0: unblanked (backlight on) * blank_mode == 1: blank (backlight on) * blank_mode == 2: suspend vsync (backlight off) * blank_mode == 3: suspend hsync (backlight off) * blank_mode == 4: powerdown (backlight off) * * wms...Enable VESA DPMS compatible powerdown mode * run "setterm -powersave powerdown" to take advantage */ struct neofb_par *par = info->par; int seqflags, lcdflags, dpmsflags, reg, tmpdisp; /* * Read back the register bits related to display configuration. They might * have been changed underneath the driver via Fn key stroke. */ neoUnlock(); tmpdisp = vga_rgfx(NULL, 0x20) & 0x03; neoLock(&par->state); /* In case we blank the screen, we want to store the possibly new * configuration in the driver. During un-blank, we re-apply this setting, * since the LCD bit will be cleared in order to switch off the backlight. */ if (par->PanelDispCntlRegRead) { par->PanelDispCntlReg1 = tmpdisp; } par->PanelDispCntlRegRead = !blank_mode; switch (blank_mode) { case FB_BLANK_POWERDOWN: /* powerdown - both sync lines down */ seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ lcdflags = 0; /* LCD off */ dpmsflags = NEO_GR01_SUPPRESS_HSYNC | NEO_GR01_SUPPRESS_VSYNC; #ifdef CONFIG_TOSHIBA /* Do we still need this ? */ /* attempt to turn off backlight on toshiba; also turns off external */ { SMMRegisters regs; regs.eax = 0xff00; /* HCI_SET */ regs.ebx = 0x0002; /* HCI_BACKLIGHT */ regs.ecx = 0x0000; /* HCI_DISABLE */ tosh_smm(&regs); } #endif break; case FB_BLANK_HSYNC_SUSPEND: /* hsync off */ seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ lcdflags = 0; /* LCD off */ dpmsflags = NEO_GR01_SUPPRESS_HSYNC; break; case FB_BLANK_VSYNC_SUSPEND: /* vsync off */ seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ lcdflags = 0; /* LCD off */ dpmsflags = NEO_GR01_SUPPRESS_VSYNC; break; case FB_BLANK_NORMAL: /* just blank screen (backlight stays on) */ seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */ /* * During a blank operation with the LID shut, we might store "LCD off" * by mistake. Due to timing issues, the BIOS may switch the lights * back on, and we turn it back off once we "unblank". * * So here is an attempt to implement ">=" - if we are in the process * of unblanking, and the LCD bit is unset in the driver but set in the * register, we must keep it. */ lcdflags = ((par->PanelDispCntlReg1 | tmpdisp) & 0x02); /* LCD normal */ dpmsflags = 0x00; /* no hsync/vsync suppression */ break; case FB_BLANK_UNBLANK: /* unblank */ seqflags = 0; /* Enable sequencer */ lcdflags = ((par->PanelDispCntlReg1 | tmpdisp) & 0x02); /* LCD normal */ dpmsflags = 0x00; /* no hsync/vsync suppression */ #ifdef CONFIG_TOSHIBA /* Do we still need this ? */ /* attempt to re-enable backlight/external on toshiba */ { SMMRegisters regs; regs.eax = 0xff00; /* HCI_SET */ regs.ebx = 0x0002; /* HCI_BACKLIGHT */ regs.ecx = 0x0001; /* HCI_ENABLE */ tosh_smm(&regs); } #endif break; default: /* Anything else we don't understand; return 1 to tell * fb_blank we didn't aactually do anything */ return 1; } neoUnlock(); reg = (vga_rseq(NULL, 0x01) & ~0x20) | seqflags; vga_wseq(NULL, 0x01, reg); reg = (vga_rgfx(NULL, 0x20) & ~0x02) | lcdflags; vga_wgfx(NULL, 0x20, reg); reg = (vga_rgfx(NULL, 0x01) & ~0xF0) | 0x80 | dpmsflags; vga_wgfx(NULL, 0x01, reg); neoLock(&par->state); return 0; } static void neo2200_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct neofb_par *par = info->par; u_long dst, rop; dst = rect->dx + rect->dy * info->var.xres_virtual; rop = rect->rop ? 0x060000 : 0x0c0000; neo2200_wait_fifo(info, 4); /* set blt control */ writel(NEO_BC3_FIFO_EN | NEO_BC0_SRC_IS_FG | NEO_BC3_SKIP_MAPPING | // NEO_BC3_DST_XY_ADDR | // NEO_BC3_SRC_XY_ADDR | rop, &par->neo2200->bltCntl); switch (info->var.bits_per_pixel) { case 8: writel(rect->color, &par->neo2200->fgColor); break; case 16: case 24: writel(((u32 *) (info->pseudo_palette))[rect->color], &par->neo2200->fgColor); break; } writel(dst * ((info->var.bits_per_pixel + 7) >> 3), &par->neo2200->dstStart); writel((rect->height << 16) | (rect->width & 0xffff), &par->neo2200->xyExt); } static void neo2200_copyarea(struct fb_info *info, const struct fb_copyarea *area) { u32 sx = area->sx, sy = area->sy, dx = area->dx, dy = area->dy; struct neofb_par *par = info->par; u_long src, dst, bltCntl; bltCntl = NEO_BC3_FIFO_EN | NEO_BC3_SKIP_MAPPING | 0x0C0000; if ((dy > sy) || ((dy == sy) && (dx > sx))) { /* Start with the lower right corner */ sy += (area->height - 1); dy += (area->height - 1); sx += (area->width - 1); dx += (area->width - 1); bltCntl |= NEO_BC0_X_DEC | NEO_BC0_DST_Y_DEC | NEO_BC0_SRC_Y_DEC; } src = sx * (info->var.bits_per_pixel >> 3) + sy*info->fix.line_length; dst = dx * (info->var.bits_per_pixel >> 3) + dy*info->fix.line_length; neo2200_wait_fifo(info, 4); /* set blt control */ writel(bltCntl, &par->neo2200->bltCntl); writel(src, &par->neo2200->srcStart); writel(dst, &par->neo2200->dstStart); writel((area->height << 16) | (area->width & 0xffff), &par->neo2200->xyExt); } static void neo2200_imageblit(struct fb_info *info, const struct fb_image *image) { struct neofb_par *par = info->par; int s_pitch = (image->width * image->depth + 7) >> 3; int scan_align = info->pixmap.scan_align - 1; int buf_align = info->pixmap.buf_align - 1; int bltCntl_flags, d_pitch, data_len; // The data is padded for the hardware d_pitch = (s_pitch + scan_align) & ~scan_align; data_len = ((d_pitch * image->height) + buf_align) & ~buf_align; neo2200_sync(info); if (image->depth == 1) { if (info->var.bits_per_pixel == 24 && image->width < 16) { /* FIXME. There is a bug with accelerated color-expanded * transfers in 24 bit mode if the image being transferred * is less than 16 bits wide. This is due to insufficient * padding when writing the image. We need to adjust * struct fb_pixmap. Not yet done. */ cfb_imageblit(info, image); return; } bltCntl_flags = NEO_BC0_SRC_MONO; } else if (image->depth == info->var.bits_per_pixel) { bltCntl_flags = 0; } else { /* We don't currently support hardware acceleration if image * depth is different from display */ cfb_imageblit(info, image); return; } switch (info->var.bits_per_pixel) { case 8: writel(image->fg_color, &par->neo2200->fgColor); writel(image->bg_color, &par->neo2200->bgColor); break; case 16: case 24: writel(((u32 *) (info->pseudo_palette))[image->fg_color], &par->neo2200->fgColor); writel(((u32 *) (info->pseudo_palette))[image->bg_color], &par->neo2200->bgColor); break; } writel(NEO_BC0_SYS_TO_VID | NEO_BC3_SKIP_MAPPING | bltCntl_flags | // NEO_BC3_DST_XY_ADDR | 0x0c0000, &par->neo2200->bltCntl); writel(0, &par->neo2200->srcStart); // par->neo2200->dstStart = (image->dy << 16) | (image->dx & 0xffff); writel(((image->dx & 0xffff) * (info->var.bits_per_pixel >> 3) + image->dy * info->fix.line_length), &par->neo2200->dstStart); writel((image->height << 16) | (image->width & 0xffff), &par->neo2200->xyExt); memcpy_toio(par->mmio_vbase + 0x100000, image->data, data_len); } static void neofb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: neo2200_fillrect(info, rect); break; default: cfb_fillrect(info, rect); break; } } static void neofb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: neo2200_copyarea(info, area); break; default: cfb_copyarea(info, area); break; } } static void neofb_imageblit(struct fb_info *info, const struct fb_image *image) { switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: neo2200_imageblit(info, image); break; default: cfb_imageblit(info, image); break; } } static int neofb_sync(struct fb_info *info) { switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: neo2200_sync(info); break; default: break; } return 0; } /* static void neofb_draw_cursor(struct fb_info *info, u8 *dst, u8 *src, unsigned int width) { //memset_io(info->sprite.addr, 0xff, 1); } static int neofb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct neofb_par *par = (struct neofb_par *) info->par; * Disable cursor * write_le32(NEOREG_CURSCNTL, ~NEO_CURS_ENABLE, par); if (cursor->set & FB_CUR_SETPOS) { u32 x = cursor->image.dx; u32 y = cursor->image.dy; info->cursor.image.dx = x; info->cursor.image.dy = y; write_le32(NEOREG_CURSX, x, par); write_le32(NEOREG_CURSY, y, par); } if (cursor->set & FB_CUR_SETSIZE) { info->cursor.image.height = cursor->image.height; info->cursor.image.width = cursor->image.width; } if (cursor->set & FB_CUR_SETHOT) info->cursor.hot = cursor->hot; if (cursor->set & FB_CUR_SETCMAP) { if (cursor->image.depth == 1) { u32 fg = cursor->image.fg_color; u32 bg = cursor->image.bg_color; info->cursor.image.fg_color = fg; info->cursor.image.bg_color = bg; fg = ((fg & 0xff0000) >> 16) | ((fg & 0xff) << 16) | (fg & 0xff00); bg = ((bg & 0xff0000) >> 16) | ((bg & 0xff) << 16) | (bg & 0xff00); write_le32(NEOREG_CURSFGCOLOR, fg, par); write_le32(NEOREG_CURSBGCOLOR, bg, par); } } if (cursor->set & FB_CUR_SETSHAPE) fb_load_cursor_image(info); if (info->cursor.enable) write_le32(NEOREG_CURSCNTL, NEO_CURS_ENABLE, par); return 0; } */ static struct fb_ops neofb_ops = { .owner = THIS_MODULE, .fb_open = neofb_open, .fb_release = neofb_release, .fb_check_var = neofb_check_var, .fb_set_par = neofb_set_par, .fb_setcolreg = neofb_setcolreg, .fb_pan_display = neofb_pan_display, .fb_blank = neofb_blank, .fb_sync = neofb_sync, .fb_fillrect = neofb_fillrect, .fb_copyarea = neofb_copyarea, .fb_imageblit = neofb_imageblit, }; /* --------------------------------------------------------------------- */ static struct fb_videomode mode800x480 = { .xres = 800, .yres = 480, .pixclock = 25000, .left_margin = 88, .right_margin = 40, .upper_margin = 23, .lower_margin = 1, .hsync_len = 128, .vsync_len = 4, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, .vmode = FB_VMODE_NONINTERLACED }; static int neo_map_mmio(struct fb_info *info, struct pci_dev *dev) { struct neofb_par *par = info->par; DBG("neo_map_mmio"); switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2070: info->fix.mmio_start = pci_resource_start(dev, 0)+ 0x100000; break; case FB_ACCEL_NEOMAGIC_NM2090: case FB_ACCEL_NEOMAGIC_NM2093: info->fix.mmio_start = pci_resource_start(dev, 0)+ 0x200000; break; case FB_ACCEL_NEOMAGIC_NM2160: case FB_ACCEL_NEOMAGIC_NM2097: case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: info->fix.mmio_start = pci_resource_start(dev, 1); break; default: info->fix.mmio_start = pci_resource_start(dev, 0); } info->fix.mmio_len = MMIO_SIZE; if (!request_mem_region (info->fix.mmio_start, MMIO_SIZE, "memory mapped I/O")) { printk("neofb: memory mapped IO in use\n"); return -EBUSY; } par->mmio_vbase = ioremap(info->fix.mmio_start, MMIO_SIZE); if (!par->mmio_vbase) { printk("neofb: unable to map memory mapped IO\n"); release_mem_region(info->fix.mmio_start, info->fix.mmio_len); return -ENOMEM; } else printk(KERN_INFO "neofb: mapped io at %p\n", par->mmio_vbase); return 0; } static void neo_unmap_mmio(struct fb_info *info) { struct neofb_par *par = info->par; DBG("neo_unmap_mmio"); iounmap(par->mmio_vbase); par->mmio_vbase = NULL; release_mem_region(info->fix.mmio_start, info->fix.mmio_len); } static int neo_map_video(struct fb_info *info, struct pci_dev *dev, int video_len) { //unsigned long addr; DBG("neo_map_video"); info->fix.smem_start = pci_resource_start(dev, 0); info->fix.smem_len = video_len; if (!request_mem_region(info->fix.smem_start, info->fix.smem_len, "frame buffer")) { printk("neofb: frame buffer in use\n"); return -EBUSY; } info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len); if (!info->screen_base) { printk("neofb: unable to map screen memory\n"); release_mem_region(info->fix.smem_start, info->fix.smem_len); return -ENOMEM; } else printk(KERN_INFO "neofb: mapped framebuffer at %p\n", info->screen_base); #ifdef CONFIG_MTRR ((struct neofb_par *)(info->par))->mtrr = mtrr_add(info->fix.smem_start, pci_resource_len(dev, 0), MTRR_TYPE_WRCOMB, 1); #endif /* Clear framebuffer, it's all white in memory after boot */ memset_io(info->screen_base, 0, info->fix.smem_len); /* Allocate Cursor drawing pad. info->fix.smem_len -= PAGE_SIZE; addr = info->fix.smem_start + info->fix.smem_len; write_le32(NEOREG_CURSMEMPOS, ((0x000f & (addr >> 10)) << 8) | ((0x0ff0 & (addr >> 10)) >> 4), par); addr = (unsigned long) info->screen_base + info->fix.smem_len; info->sprite.addr = (u8 *) addr; */ return 0; } static void neo_unmap_video(struct fb_info *info) { DBG("neo_unmap_video"); #ifdef CONFIG_MTRR { struct neofb_par *par = info->par; mtrr_del(par->mtrr, info->fix.smem_start, info->fix.smem_len); } #endif iounmap(info->screen_base); info->screen_base = NULL; release_mem_region(info->fix.smem_start, info->fix.smem_len); } static int neo_scan_monitor(struct fb_info *info) { struct neofb_par *par = info->par; unsigned char type, display; int w; // Eventually we will have i2c support. info->monspecs.modedb = kmalloc(sizeof(struct fb_videomode), GFP_KERNEL); if (!info->monspecs.modedb) return -ENOMEM; info->monspecs.modedb_len = 1; /* Determine the panel type */ vga_wgfx(NULL, 0x09, 0x26); type = vga_rgfx(NULL, 0x21); display = vga_rgfx(NULL, 0x20); if (!par->internal_display && !par->external_display) { par->internal_display = display & 2 || !(display & 3) ? 1 : 0; par->external_display = display & 1; printk (KERN_INFO "Autodetected %s display\n", par->internal_display && par->external_display ? "simultaneous" : par->internal_display ? "internal" : "external"); } /* Determine panel width -- used in NeoValidMode. */ w = vga_rgfx(NULL, 0x20); vga_wgfx(NULL, 0x09, 0x00); switch ((w & 0x18) >> 3) { case 0x00: // 640x480@60 par->NeoPanelWidth = 640; par->NeoPanelHeight = 480; memcpy(info->monspecs.modedb, &vesa_modes[3], sizeof(struct fb_videomode)); break; case 0x01: par->NeoPanelWidth = 800; if (par->libretto) { par->NeoPanelHeight = 480; memcpy(info->monspecs.modedb, &mode800x480, sizeof(struct fb_videomode)); } else { // 800x600@60 par->NeoPanelHeight = 600; memcpy(info->monspecs.modedb, &vesa_modes[8], sizeof(struct fb_videomode)); } break; case 0x02: // 1024x768@60 par->NeoPanelWidth = 1024; par->NeoPanelHeight = 768; memcpy(info->monspecs.modedb, &vesa_modes[13], sizeof(struct fb_videomode)); break; case 0x03: /* 1280x1024@60 panel support needs to be added */ #ifdef NOT_DONE par->NeoPanelWidth = 1280; par->NeoPanelHeight = 1024; memcpy(info->monspecs.modedb, &vesa_modes[20], sizeof(struct fb_videomode)); break; #else printk(KERN_ERR "neofb: Only 640x480, 800x600/480 and 1024x768 panels are currently supported\n"); return -1; #endif default: // 640x480@60 par->NeoPanelWidth = 640; par->NeoPanelHeight = 480; memcpy(info->monspecs.modedb, &vesa_modes[3], sizeof(struct fb_videomode)); break; } printk(KERN_INFO "Panel is a %dx%d %s %s display\n", par->NeoPanelWidth, par->NeoPanelHeight, (type & 0x02) ? "color" : "monochrome", (type & 0x10) ? "TFT" : "dual scan"); return 0; } static int neo_init_hw(struct fb_info *info) { struct neofb_par *par = info->par; int videoRam = 896; int maxClock = 65000; int CursorMem = 1024; int CursorOff = 0x100; DBG("neo_init_hw"); neoUnlock(); #if 0 printk(KERN_DEBUG "--- Neo extended register dump ---\n"); for (int w = 0; w < 0x85; w++) printk(KERN_DEBUG "CR %p: %p\n", (void *) w, (void *) vga_rcrt(NULL, w)); for (int w = 0; w < 0xC7; w++) printk(KERN_DEBUG "GR %p: %p\n", (void *) w, (void *) vga_rgfx(NULL, w)); #endif switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2070: videoRam = 896; maxClock = 65000; break; case FB_ACCEL_NEOMAGIC_NM2090: case FB_ACCEL_NEOMAGIC_NM2093: case FB_ACCEL_NEOMAGIC_NM2097: videoRam = 1152; maxClock = 80000; break; case FB_ACCEL_NEOMAGIC_NM2160: videoRam = 2048; maxClock = 90000; break; case FB_ACCEL_NEOMAGIC_NM2200: videoRam = 2560; maxClock = 110000; break; case FB_ACCEL_NEOMAGIC_NM2230: videoRam = 3008; maxClock = 110000; break; case FB_ACCEL_NEOMAGIC_NM2360: videoRam = 4096; maxClock = 110000; break; case FB_ACCEL_NEOMAGIC_NM2380: videoRam = 6144; maxClock = 110000; break; } switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2070: case FB_ACCEL_NEOMAGIC_NM2090: case FB_ACCEL_NEOMAGIC_NM2093: CursorMem = 2048; CursorOff = 0x100; break; case FB_ACCEL_NEOMAGIC_NM2097: case FB_ACCEL_NEOMAGIC_NM2160: CursorMem = 1024; CursorOff = 0x100; break; case FB_ACCEL_NEOMAGIC_NM2200: case FB_ACCEL_NEOMAGIC_NM2230: case FB_ACCEL_NEOMAGIC_NM2360: case FB_ACCEL_NEOMAGIC_NM2380: CursorMem = 1024; CursorOff = 0x1000; par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase; break; } /* info->sprite.size = CursorMem; info->sprite.scan_align = 1; info->sprite.buf_align = 1; info->sprite.flags = FB_PIXMAP_IO; info->sprite.outbuf = neofb_draw_cursor; */ par->maxClock = maxClock; par->cursorOff = CursorOff; return videoRam * 1024; } static struct fb_info *neo_alloc_fb_info(struct pci_dev *dev, const struct pci_device_id *id) { struct fb_info *info; struct neofb_par *par; info = framebuffer_alloc(sizeof(struct neofb_par), &dev->dev); if (!info) return NULL; par = info->par; info->fix.accel = id->driver_data; par->pci_burst = !nopciburst; par->lcd_stretch = !nostretch; par->libretto = libretto; par->internal_display = internal; par->external_display = external; info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; switch (info->fix.accel) { case FB_ACCEL_NEOMAGIC_NM2070: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 128"); break; case FB_ACCEL_NEOMAGIC_NM2090: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 128V"); break; case FB_ACCEL_NEOMAGIC_NM2093: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 128ZV"); break; case FB_ACCEL_NEOMAGIC_NM2097: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 128ZV+"); break; case FB_ACCEL_NEOMAGIC_NM2160: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 128XD"); break; case FB_ACCEL_NEOMAGIC_NM2200: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 256AV"); info->flags |= FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; break; case FB_ACCEL_NEOMAGIC_NM2230: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 256AV+"); info->flags |= FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; break; case FB_ACCEL_NEOMAGIC_NM2360: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 256ZX"); info->flags |= FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; break; case FB_ACCEL_NEOMAGIC_NM2380: snprintf(info->fix.id, sizeof(info->fix.id), "MagicGraph 256XL+"); info->flags |= FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; break; } info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.type_aux = 0; info->fix.xpanstep = 0; info->fix.ypanstep = 4; info->fix.ywrapstep = 0; info->fix.accel = id->driver_data; info->fbops = &neofb_ops; info->pseudo_palette = par->palette; return info; } static void neo_free_fb_info(struct fb_info *info) { if (info) { /* * Free the colourmap */ fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } } /* --------------------------------------------------------------------- */ static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct fb_info *info; u_int h_sync, v_sync; int video_len, err; DBG("neofb_probe"); err = pci_enable_device(dev); if (err) return err; err = -ENOMEM; info = neo_alloc_fb_info(dev, id); if (!info) return err; err = neo_map_mmio(info, dev); if (err) goto err_map_mmio; err = neo_scan_monitor(info); if (err) goto err_scan_monitor; video_len = neo_init_hw(info); if (video_len < 0) { err = video_len; goto err_init_hw; } err = neo_map_video(info, dev, video_len); if (err) goto err_init_hw; if (!fb_find_mode(&info->var, info, mode_option, NULL, 0, info->monspecs.modedb, 16)) { printk(KERN_ERR "neofb: Unable to find usable video mode.\n"); goto err_map_video; } /* * Calculate the hsync and vsync frequencies. Note that * we split the 1e12 constant up so that we can preserve * the precision and fit the results into 32-bit registers. * (1953125000 * 512 = 1e12) */ h_sync = 1953125000 / info->var.pixclock; h_sync = h_sync * 512 / (info->var.xres + info->var.left_margin + info->var.right_margin + info->var.hsync_len); v_sync = h_sync / (info->var.yres + info->var.upper_margin + info->var.lower_margin + info->var.vsync_len); printk(KERN_INFO "neofb v" NEOFB_VERSION ": %dkB VRAM, using %dx%d, %d.%03dkHz, %dHz\n", info->fix.smem_len >> 10, info->var.xres, info->var.yres, h_sync / 1000, h_sync % 1000, v_sync); if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) goto err_map_video; err = register_framebuffer(info); if (err < 0) goto err_reg_fb; printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id); /* * Our driver data */ pci_set_drvdata(dev, info); return 0; err_reg_fb: fb_dealloc_cmap(&info->cmap); err_map_video: neo_unmap_video(info); err_init_hw: fb_destroy_modedb(info->monspecs.modedb); err_scan_monitor: neo_unmap_mmio(info); err_map_mmio: neo_free_fb_info(info); return err; } static void neofb_remove(struct pci_dev *dev) { struct fb_info *info = pci_get_drvdata(dev); DBG("neofb_remove"); if (info) { /* * If unregister_framebuffer fails, then * we will be leaving hooks that could cause * oopsen laying around. */ if (unregister_framebuffer(info)) printk(KERN_WARNING "neofb: danger danger! Oopsen imminent!\n"); neo_unmap_video(info); fb_destroy_modedb(info->monspecs.modedb); neo_unmap_mmio(info); neo_free_fb_info(info); /* * Ensure that the driver data is no longer * valid. */ pci_set_drvdata(dev, NULL); } } static struct pci_device_id neofb_devices[] = { {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2070, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2070}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2090, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2090}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2093, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2093}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2097, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2097}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2160, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2160}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2200}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2230}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2360}, {PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2380, PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2380}, {0, 0, 0, 0, 0, 0, 0} }; MODULE_DEVICE_TABLE(pci, neofb_devices); static struct pci_driver neofb_driver = { .name = "neofb", .id_table = neofb_devices, .probe = neofb_probe, .remove = neofb_remove, }; /* ************************* init in-kernel code ************************** */ #ifndef MODULE static int __init neofb_setup(char *options) { char *this_opt; DBG("neofb_setup"); if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; if (!strncmp(this_opt, "internal", 8)) internal = 1; else if (!strncmp(this_opt, "external", 8)) external = 1; else if (!strncmp(this_opt, "nostretch", 9)) nostretch = 1; else if (!strncmp(this_opt, "nopciburst", 10)) nopciburst = 1; else if (!strncmp(this_opt, "libretto", 8)) libretto = 1; else mode_option = this_opt; } return 0; } #endif /* MODULE */ static int __init neofb_init(void) { #ifndef MODULE char *option = NULL; if (fb_get_options("neofb", &option)) return -ENODEV; neofb_setup(option); #endif return pci_register_driver(&neofb_driver); } module_init(neofb_init); #ifdef MODULE static void __exit neofb_exit(void) { pci_unregister_driver(&neofb_driver); } module_exit(neofb_exit); #endif /* MODULE */
gpl-2.0
lnfamous/Kernel_Htc_Pico_CyanogenMod9
arch/arm/mach-s3c2412/mach-smdk2413.c
2824
3852
/* linux/arch/arm/mach-s3c2412/mach-smdk2413.c * * Copyright (c) 2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * Thanks to Dimity Andric (TomTom) and Steven Ryu (Samsung) for the * loans of SMDK2413 to work with. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/gpio.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <asm/hardware/iomd.h> #include <asm/setup.h> #include <asm/irq.h> #include <asm/mach-types.h> //#include <asm/debug-ll.h> #include <plat/regs-serial.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> #include <mach/idle.h> #include <plat/udc.h> #include <plat/iic.h> #include <mach/fb.h> #include <plat/s3c2410.h> #include <plat/s3c2412.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/common-smdk.h> static struct map_desc smdk2413_iodesc[] __initdata = { }; static struct s3c2410_uartcfg smdk2413_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, }, [1] = { .hwport = 1, .flags = 0, .ucon = 0x3c5, .ulcon = 0x03, .ufcon = 0x51, }, /* IR port */ [2] = { .hwport = 2, .flags = 0, .ucon = 0x3c5, .ulcon = 0x43, .ufcon = 0x51, } }; static struct s3c2410_udc_mach_info smdk2413_udc_cfg __initdata = { .pullup_pin = S3C2410_GPF(2), }; static struct platform_device *smdk2413_devices[] __initdata = { &s3c_device_ohci, &s3c_device_wdt, &s3c_device_i2c0, &s3c_device_iis, &s3c_device_usbgadget, }; static void __init smdk2413_fixup(struct machine_desc *desc, struct tag *tags, char **cmdline, struct meminfo *mi) { if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { mi->nr_banks=1; mi->bank[0].start = 0x30000000; mi->bank[0].size = SZ_64M; } } static void __init smdk2413_map_io(void) { s3c24xx_init_io(smdk2413_iodesc, ARRAY_SIZE(smdk2413_iodesc)); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(smdk2413_uartcfgs, ARRAY_SIZE(smdk2413_uartcfgs)); } static void __init smdk2413_machine_init(void) { /* Turn off suspend on both USB ports, and switch the * selectable USB port to USB device mode. */ s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST | S3C2410_MISCCR_USBSUSPND0 | S3C2410_MISCCR_USBSUSPND1, 0x0); s3c24xx_udc_set_platdata(&smdk2413_udc_cfg); s3c_i2c0_set_platdata(NULL); platform_add_devices(smdk2413_devices, ARRAY_SIZE(smdk2413_devices)); smdk_machine_init(); } MACHINE_START(S3C2413, "S3C2413") /* Maintainer: Ben Dooks <ben-linux@fluff.org> */ .boot_params = S3C2410_SDRAM_PA + 0x100, .fixup = smdk2413_fixup, .init_irq = s3c24xx_init_irq, .map_io = smdk2413_map_io, .init_machine = smdk2413_machine_init, .timer = &s3c24xx_timer, MACHINE_END MACHINE_START(SMDK2412, "SMDK2412") /* Maintainer: Ben Dooks <ben-linux@fluff.org> */ .boot_params = S3C2410_SDRAM_PA + 0x100, .fixup = smdk2413_fixup, .init_irq = s3c24xx_init_irq, .map_io = smdk2413_map_io, .init_machine = smdk2413_machine_init, .timer = &s3c24xx_timer, MACHINE_END MACHINE_START(SMDK2413, "SMDK2413") /* Maintainer: Ben Dooks <ben-linux@fluff.org> */ .boot_params = S3C2410_SDRAM_PA + 0x100, .fixup = smdk2413_fixup, .init_irq = s3c24xx_init_irq, .map_io = smdk2413_map_io, .init_machine = smdk2413_machine_init, .timer = &s3c24xx_timer, MACHINE_END
gpl-2.0
Toygoon/ToyKernel-Team.TCP_SGS2
drivers/pcmcia/sa1100_assabet.c
3336
3350
/* * drivers/pcmcia/sa1100_assabet.c * * PCMCIA implementation routines for Assabet * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/init.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/signal.h> #include <mach/assabet.h> #include "sa1100_generic.h" static struct pcmcia_irqs irqs[] = { { 1, ASSABET_IRQ_GPIO_CF_CD, "CF CD" }, { 1, ASSABET_IRQ_GPIO_CF_BVD2, "CF BVD2" }, { 1, ASSABET_IRQ_GPIO_CF_BVD1, "CF BVD1" }, }; static int assabet_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { skt->socket.pci_irq = ASSABET_IRQ_GPIO_CF_IRQ; return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); } /* * Release all resources. */ static void assabet_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) { soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); } static void assabet_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { unsigned long levels = GPLR; state->detect = (levels & ASSABET_GPIO_CF_CD) ? 0 : 1; state->ready = (levels & ASSABET_GPIO_CF_IRQ) ? 1 : 0; state->bvd1 = (levels & ASSABET_GPIO_CF_BVD1) ? 1 : 0; state->bvd2 = (levels & ASSABET_GPIO_CF_BVD2) ? 1 : 0; state->wrprot = 0; /* Not available on Assabet. */ state->vs_3v = 1; /* Can only apply 3.3V on Assabet. */ state->vs_Xv = 0; } static int assabet_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { unsigned int mask; switch (state->Vcc) { case 0: mask = 0; break; case 50: printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V...\n", __func__); case 33: /* Can only apply 3.3V to the CF slot. */ mask = ASSABET_BCR_CF_PWR; break; default: printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __func__, state->Vcc); return -1; } /* Silently ignore Vpp, output enable, speaker enable. */ if (state->flags & SS_RESET) mask |= ASSABET_BCR_CF_RST; ASSABET_BCR_frob(ASSABET_BCR_CF_RST | ASSABET_BCR_CF_PWR, mask); return 0; } /* * Enable card status IRQs on (re-)initialisation. This can * be called at initialisation, power management event, or * pcmcia event. */ static void assabet_pcmcia_socket_init(struct soc_pcmcia_socket *skt) { /* * Enable CF bus */ ASSABET_BCR_clear(ASSABET_BCR_CF_BUS_OFF); soc_pcmcia_enable_irqs(skt, irqs, ARRAY_SIZE(irqs)); } /* * Disable card status IRQs on suspend. */ static void assabet_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) { soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs)); /* * Tristate the CF bus signals. Also assert CF * reset as per user guide page 4-11. */ ASSABET_BCR_set(ASSABET_BCR_CF_BUS_OFF | ASSABET_BCR_CF_RST); } static struct pcmcia_low_level assabet_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = assabet_pcmcia_hw_init, .hw_shutdown = assabet_pcmcia_hw_shutdown, .socket_state = assabet_pcmcia_socket_state, .configure_socket = assabet_pcmcia_configure_socket, .socket_init = assabet_pcmcia_socket_init, .socket_suspend = assabet_pcmcia_socket_suspend, }; int __devinit pcmcia_assabet_init(struct device *dev) { int ret = -ENODEV; if (machine_is_assabet() && !machine_has_neponset()) ret = sa11xx_drv_pcmcia_probe(dev, &assabet_pcmcia_ops, 1, 1); return ret; }
gpl-2.0
qubir/PhoenixA20_linux_sourcecode
drivers/pcmcia/sa1100_assabet.c
3336
3350
/* * drivers/pcmcia/sa1100_assabet.c * * PCMCIA implementation routines for Assabet * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/init.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/signal.h> #include <mach/assabet.h> #include "sa1100_generic.h" static struct pcmcia_irqs irqs[] = { { 1, ASSABET_IRQ_GPIO_CF_CD, "CF CD" }, { 1, ASSABET_IRQ_GPIO_CF_BVD2, "CF BVD2" }, { 1, ASSABET_IRQ_GPIO_CF_BVD1, "CF BVD1" }, }; static int assabet_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { skt->socket.pci_irq = ASSABET_IRQ_GPIO_CF_IRQ; return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); } /* * Release all resources. */ static void assabet_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) { soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); } static void assabet_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { unsigned long levels = GPLR; state->detect = (levels & ASSABET_GPIO_CF_CD) ? 0 : 1; state->ready = (levels & ASSABET_GPIO_CF_IRQ) ? 1 : 0; state->bvd1 = (levels & ASSABET_GPIO_CF_BVD1) ? 1 : 0; state->bvd2 = (levels & ASSABET_GPIO_CF_BVD2) ? 1 : 0; state->wrprot = 0; /* Not available on Assabet. */ state->vs_3v = 1; /* Can only apply 3.3V on Assabet. */ state->vs_Xv = 0; } static int assabet_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { unsigned int mask; switch (state->Vcc) { case 0: mask = 0; break; case 50: printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V...\n", __func__); case 33: /* Can only apply 3.3V to the CF slot. */ mask = ASSABET_BCR_CF_PWR; break; default: printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __func__, state->Vcc); return -1; } /* Silently ignore Vpp, output enable, speaker enable. */ if (state->flags & SS_RESET) mask |= ASSABET_BCR_CF_RST; ASSABET_BCR_frob(ASSABET_BCR_CF_RST | ASSABET_BCR_CF_PWR, mask); return 0; } /* * Enable card status IRQs on (re-)initialisation. This can * be called at initialisation, power management event, or * pcmcia event. */ static void assabet_pcmcia_socket_init(struct soc_pcmcia_socket *skt) { /* * Enable CF bus */ ASSABET_BCR_clear(ASSABET_BCR_CF_BUS_OFF); soc_pcmcia_enable_irqs(skt, irqs, ARRAY_SIZE(irqs)); } /* * Disable card status IRQs on suspend. */ static void assabet_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) { soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs)); /* * Tristate the CF bus signals. Also assert CF * reset as per user guide page 4-11. */ ASSABET_BCR_set(ASSABET_BCR_CF_BUS_OFF | ASSABET_BCR_CF_RST); } static struct pcmcia_low_level assabet_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = assabet_pcmcia_hw_init, .hw_shutdown = assabet_pcmcia_hw_shutdown, .socket_state = assabet_pcmcia_socket_state, .configure_socket = assabet_pcmcia_configure_socket, .socket_init = assabet_pcmcia_socket_init, .socket_suspend = assabet_pcmcia_socket_suspend, }; int __devinit pcmcia_assabet_init(struct device *dev) { int ret = -ENODEV; if (machine_is_assabet() && !machine_has_neponset()) ret = sa11xx_drv_pcmcia_probe(dev, &assabet_pcmcia_ops, 1, 1); return ret; }
gpl-2.0
friedrich420/Note-4-TMO-AEL-Kernel-Lollipop-Source
drivers/net/wimax/i2400m/usb-rx.c
5640
15058
/* * Intel Wireless WiMAX Connection 2400m * USB RX handling * * * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * Intel Corporation <linux-wimax@intel.com> * Yanir Lubetkin <yanirx.lubetkin@intel.com> * - Initial implementation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * - Use skb_clone(), break up processing in chunks * - Split transport/device specific * - Make buffer size dynamic to exert less memory pressure * * * This handles the RX path on USB. * * When a notification is received that says 'there is RX data ready', * we call i2400mu_rx_kick(); that wakes up the RX kthread, which * reads a buffer from USB and passes it to i2400m_rx() in the generic * handling code. The RX buffer has an specific format that is * described in rx.c. * * We use a kernel thread in a loop because: * * - we want to be able to call the USB power management get/put * functions (blocking) before each transaction. * * - We might get a lot of notifications and we don't want to submit * a zillion reads; by serializing, we are throttling. * * - RX data processing can get heavy enough so that it is not * appropriate for doing it in the USB callback; thus we run it in a * process context. * * We provide a read buffer of an arbitrary size (short of a page); if * the callback reports -EOVERFLOW, it means it was too small, so we * just double the size and retry (being careful to append, as * sometimes the device provided some data). Every now and then we * check if the average packet size is smaller than the current packet * size and if so, we halve it. At the end, the size of the * preallocated buffer should be following the average received * transaction size, adapting dynamically to it. * * ROADMAP * * i2400mu_rx_kick() Called from notif.c when we get a * 'data ready' notification * i2400mu_rxd() Kernel RX daemon * i2400mu_rx() Receive USB data * i2400m_rx() Send data to generic i2400m RX handling * * i2400mu_rx_setup() called from i2400mu_bus_dev_start() * * i2400mu_rx_release() called from i2400mu_bus_dev_stop() */ #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/usb.h> #include "i2400m-usb.h" #define D_SUBMODULE rx #include "usb-debug-levels.h" /* * Dynamic RX size * * We can't let the rx_size be a multiple of 512 bytes (the RX * endpoint's max packet size). On some USB host controllers (we * haven't been able to fully characterize which), if the device is * about to send (for example) X bytes and we only post a buffer to * receive n*512, it will fail to mark that as babble (so that * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the * rest). * * So on growing or shrinking, if it is a multiple of the * maxpacketsize, we remove some (instead of incresing some, so in a * buddy allocator we try to waste less space). * * Note we also need a hook for this on i2400mu_rx() -- when we do the * first read, we are sure we won't hit this spot because * i240mm->rx_size has been set properly. However, if we have to * double because of -EOVERFLOW, when we launch the read to get the * rest of the data, we *have* to make sure that also is not a * multiple of the max_pkt_size. */ static size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu) { struct device *dev = &i2400mu->usb_iface->dev; size_t rx_size; const size_t max_pkt_size = 512; rx_size = 2 * i2400mu->rx_size; if (rx_size % max_pkt_size == 0) { rx_size -= 8; d_printf(1, dev, "RX: expected size grew to %zu [adjusted -8] " "from %zu\n", rx_size, i2400mu->rx_size); } else d_printf(1, dev, "RX: expected size grew to %zu from %zu\n", rx_size, i2400mu->rx_size); return rx_size; } static void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu) { const size_t max_pkt_size = 512; struct device *dev = &i2400mu->usb_iface->dev; if (unlikely(i2400mu->rx_size_cnt >= 100 && i2400mu->rx_size_auto_shrink)) { size_t avg_rx_size = i2400mu->rx_size_acc / i2400mu->rx_size_cnt; size_t new_rx_size = i2400mu->rx_size / 2; if (avg_rx_size < new_rx_size) { if (new_rx_size % max_pkt_size == 0) { new_rx_size -= 8; d_printf(1, dev, "RX: expected size shrank to %zu " "[adjusted -8] from %zu\n", new_rx_size, i2400mu->rx_size); } else d_printf(1, dev, "RX: expected size shrank to %zu " "from %zu\n", new_rx_size, i2400mu->rx_size); i2400mu->rx_size = new_rx_size; i2400mu->rx_size_cnt = 0; i2400mu->rx_size_acc = i2400mu->rx_size; } } } /* * Receive a message with payloads from the USB bus into an skb * * @i2400mu: USB device descriptor * @rx_skb: skb where to place the received message * * Deals with all the USB-specifics of receiving, dynamically * increasing the buffer size if so needed. Returns the payload in the * skb, ready to process. On a zero-length packet, we retry. * * On soft USB errors, we retry (until they become too frequent and * then are promoted to hard); on hard USB errors, we reset the * device. On other errors (skb realloacation, we just drop it and * hope for the next invocation to solve it). * * Returns: pointer to the skb if ok, ERR_PTR on error. * NOTE: this function might realloc the skb (if it is too small), * so always update with the one returned. * ERR_PTR() is < 0 on error. * Will return NULL if it cannot reallocate -- this can be * considered a transient retryable error. */ static struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb) { int result = 0; struct device *dev = &i2400mu->usb_iface->dev; int usb_pipe, read_size, rx_size, do_autopm; struct usb_endpoint_descriptor *epd; const size_t max_pkt_size = 512; d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); do_autopm = atomic_read(&i2400mu->do_autopm); result = do_autopm ? usb_autopm_get_interface(i2400mu->usb_iface) : 0; if (result < 0) { dev_err(dev, "RX: can't get autopm: %d\n", result); do_autopm = 0; } epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in); usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); retry: rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len; if (unlikely(rx_size % max_pkt_size == 0)) { rx_size -= 8; d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size); } result = usb_bulk_msg( i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len, rx_size, &read_size, 200); usb_mark_last_busy(i2400mu->usb_dev); switch (result) { case 0: if (read_size == 0) goto retry; /* ZLP, just resubmit */ skb_put(rx_skb, read_size); break; case -EPIPE: /* * Stall -- maybe the device is choking with our * requests. Clear it and give it some time. If they * happen to often, it might be another symptom, so we * reset. * * No error handling for usb_clear_halt(0; if it * works, the retry works; if it fails, this switch * does the error handling for us. */ if (edc_inc(&i2400mu->urb_edc, 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { dev_err(dev, "BM-CMD: too many stalls in " "URB; resetting device\n"); goto do_reset; } usb_clear_halt(i2400mu->usb_dev, usb_pipe); msleep(10); /* give the device some time */ goto retry; case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* just ignore it */ case -ESHUTDOWN: case -ECONNRESET: break; case -EOVERFLOW: { /* too small, reallocate */ struct sk_buff *new_skb; rx_size = i2400mu_rx_size_grow(i2400mu); if (rx_size <= (1 << 16)) /* cap it */ i2400mu->rx_size = rx_size; else if (printk_ratelimit()) { dev_err(dev, "BUG? rx_size up to %d\n", rx_size); result = -EINVAL; goto out; } skb_put(rx_skb, read_size); new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len, GFP_KERNEL); if (new_skb == NULL) { if (printk_ratelimit()) dev_err(dev, "RX: Can't reallocate skb to %d; " "RX dropped\n", rx_size); kfree_skb(rx_skb); rx_skb = NULL; goto out; /* drop it...*/ } kfree_skb(rx_skb); rx_skb = new_skb; i2400mu->rx_size_cnt = 0; i2400mu->rx_size_acc = i2400mu->rx_size; d_printf(1, dev, "RX: size changed to %d, received %d, " "copied %d, capacity %ld\n", rx_size, read_size, rx_skb->len, (long) skb_end_offset(new_skb)); goto retry; } /* In most cases, it happens due to the hardware scheduling a * read when there was no data - unfortunately, we have no way * to tell this timeout from a USB timeout. So we just ignore * it. */ case -ETIMEDOUT: dev_err(dev, "RX: timeout: %d\n", result); result = 0; break; default: /* Any error */ if (edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) goto error_reset; dev_err(dev, "RX: error receiving URB: %d, retrying\n", result); goto retry; } out: if (do_autopm) usb_autopm_put_interface(i2400mu->usb_iface); d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb); return rx_skb; error_reset: dev_err(dev, "RX: maximum errors in URB exceeded; " "resetting device\n"); do_reset: usb_queue_reset_device(i2400mu->usb_iface); rx_skb = ERR_PTR(result); goto out; } /* * Kernel thread for USB reception of data * * This thread waits for a kick; once kicked, it will allocate an skb * and receive a single message to it from USB (using * i2400mu_rx()). Once received, it is passed to the generic i2400m RX * code for processing. * * When done processing, it runs some dirty statistics to verify if * the last 100 messages received were smaller than half of the * current RX buffer size. In that case, the RX buffer size is * halved. This will helps lowering the pressure on the memory * allocator. * * Hard errors force the thread to exit. */ static int i2400mu_rxd(void *_i2400mu) { int result = 0; struct i2400mu *i2400mu = _i2400mu; struct i2400m *i2400m = &i2400mu->i2400m; struct device *dev = &i2400mu->usb_iface->dev; struct net_device *net_dev = i2400m->wimax_dev.net_dev; size_t pending; int rx_size; struct sk_buff *rx_skb; unsigned long flags; d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); spin_lock_irqsave(&i2400m->rx_lock, flags); BUG_ON(i2400mu->rx_kthread != NULL); i2400mu->rx_kthread = current; spin_unlock_irqrestore(&i2400m->rx_lock, flags); while (1) { d_printf(2, dev, "RX: waiting for messages\n"); pending = 0; wait_event_interruptible( i2400mu->rx_wq, (kthread_should_stop() /* check this first! */ || (pending = atomic_read(&i2400mu->rx_pending_count))) ); if (kthread_should_stop()) break; if (pending == 0) continue; rx_size = i2400mu->rx_size; d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size); rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL); if (rx_skb == NULL) { dev_err(dev, "RX: can't allocate skb [%d bytes]\n", rx_size); msleep(50); /* give it some time? */ continue; } /* Receive the message with the payloads */ rx_skb = i2400mu_rx(i2400mu, rx_skb); result = PTR_ERR(rx_skb); if (IS_ERR(rx_skb)) goto out; atomic_dec(&i2400mu->rx_pending_count); if (rx_skb == NULL || rx_skb->len == 0) { /* some "ignorable" condition */ kfree_skb(rx_skb); continue; } /* Deliver the message to the generic i2400m code */ i2400mu->rx_size_cnt++; i2400mu->rx_size_acc += rx_skb->len; result = i2400m_rx(i2400m, rx_skb); if (result == -EIO && edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { goto error_reset; } /* Maybe adjust RX buffer size */ i2400mu_rx_size_maybe_shrink(i2400mu); } result = 0; out: spin_lock_irqsave(&i2400m->rx_lock, flags); i2400mu->rx_kthread = NULL; spin_unlock_irqrestore(&i2400m->rx_lock, flags); d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result); return result; error_reset: dev_err(dev, "RX: maximum errors in received buffer exceeded; " "resetting device\n"); usb_queue_reset_device(i2400mu->usb_iface); goto out; } /* * Start reading from the device * * @i2400m: device instance * * Notify the RX thread that there is data pending. */ void i2400mu_rx_kick(struct i2400mu *i2400mu) { struct i2400m *i2400m = &i2400mu->i2400m; struct device *dev = &i2400mu->usb_iface->dev; d_fnstart(3, dev, "(i2400mu %p)\n", i2400m); atomic_inc(&i2400mu->rx_pending_count); wake_up_all(&i2400mu->rx_wq); d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); } int i2400mu_rx_setup(struct i2400mu *i2400mu) { int result = 0; struct i2400m *i2400m = &i2400mu->i2400m; struct device *dev = &i2400mu->usb_iface->dev; struct wimax_dev *wimax_dev = &i2400m->wimax_dev; struct task_struct *kthread; kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx", wimax_dev->name); /* the kthread function sets i2400mu->rx_thread */ if (IS_ERR(kthread)) { result = PTR_ERR(kthread); dev_err(dev, "RX: cannot start thread: %d\n", result); } return result; } void i2400mu_rx_release(struct i2400mu *i2400mu) { unsigned long flags; struct i2400m *i2400m = &i2400mu->i2400m; struct device *dev = i2400m_dev(i2400m); struct task_struct *kthread; spin_lock_irqsave(&i2400m->rx_lock, flags); kthread = i2400mu->rx_kthread; i2400mu->rx_kthread = NULL; spin_unlock_irqrestore(&i2400m->rx_lock, flags); if (kthread) kthread_stop(kthread); else d_printf(1, dev, "RX: kthread had already exited\n"); }
gpl-2.0
aatjitra/silean
arch/arm/plat-samsung/clock-clksrc.c
7688
4963
/* linux/arch/arm/plat-samsung/clock-clksrc.c * * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/io.h> #include <plat/clock.h> #include <plat/clock-clksrc.h> #include <plat/cpu-freq.h> static inline struct clksrc_clk *to_clksrc(struct clk *clk) { return container_of(clk, struct clksrc_clk, clk); } static inline u32 bit_mask(u32 shift, u32 nr_bits) { u32 mask = 0xffffffff >> (32 - nr_bits); return mask << shift; } static unsigned long s3c_getrate_clksrc(struct clk *clk) { struct clksrc_clk *sclk = to_clksrc(clk); unsigned long rate = clk_get_rate(clk->parent); u32 clkdiv = __raw_readl(sclk->reg_div.reg); u32 mask = bit_mask(sclk->reg_div.shift, sclk->reg_div.size); clkdiv &= mask; clkdiv >>= sclk->reg_div.shift; clkdiv++; rate /= clkdiv; return rate; } static int s3c_setrate_clksrc(struct clk *clk, unsigned long rate) { struct clksrc_clk *sclk = to_clksrc(clk); void __iomem *reg = sclk->reg_div.reg; unsigned int div; u32 mask = bit_mask(sclk->reg_div.shift, sclk->reg_div.size); u32 val; rate = clk_round_rate(clk, rate); div = clk_get_rate(clk->parent) / rate; if (div > (1 << sclk->reg_div.size)) return -EINVAL; val = __raw_readl(reg); val &= ~mask; val |= (div - 1) << sclk->reg_div.shift; __raw_writel(val, reg); return 0; } static int s3c_setparent_clksrc(struct clk *clk, struct clk *parent) { struct clksrc_clk *sclk = to_clksrc(clk); struct clksrc_sources *srcs = sclk->sources; u32 clksrc = __raw_readl(sclk->reg_src.reg); u32 mask = bit_mask(sclk->reg_src.shift, sclk->reg_src.size); int src_nr = -1; int ptr; for (ptr = 0; ptr < srcs->nr_sources; ptr++) if (srcs->sources[ptr] == parent) { src_nr = ptr; break; } if (src_nr >= 0) { clk->parent = parent; clksrc &= ~mask; clksrc |= src_nr << sclk->reg_src.shift; __raw_writel(clksrc, sclk->reg_src.reg); return 0; } return -EINVAL; } static unsigned long s3c_roundrate_clksrc(struct clk *clk, unsigned long rate) { struct clksrc_clk *sclk = to_clksrc(clk); unsigned long parent_rate = clk_get_rate(clk->parent); int max_div = 1 << sclk->reg_div.size; int div; if (rate >= parent_rate) rate = parent_rate; else { div = parent_rate / rate; if (parent_rate % rate) div++; if (div == 0) div = 1; if (div > max_div) div = max_div; rate = parent_rate / div; } return rate; } /* Clock initialisation code */ void __init_or_cpufreq s3c_set_clksrc(struct clksrc_clk *clk, bool announce) { struct clksrc_sources *srcs = clk->sources; u32 mask = bit_mask(clk->reg_src.shift, clk->reg_src.size); u32 clksrc; if (!clk->reg_src.reg) { if (!clk->clk.parent) printk(KERN_ERR "%s: no parent clock specified\n", clk->clk.name); return; } clksrc = __raw_readl(clk->reg_src.reg); clksrc &= mask; clksrc >>= clk->reg_src.shift; if (clksrc > srcs->nr_sources || !srcs->sources[clksrc]) { printk(KERN_ERR "%s: bad source %d\n", clk->clk.name, clksrc); return; } clk->clk.parent = srcs->sources[clksrc]; if (announce) printk(KERN_INFO "%s: source is %s (%d), rate is %ld\n", clk->clk.name, clk->clk.parent->name, clksrc, clk_get_rate(&clk->clk)); } static struct clk_ops clksrc_ops = { .set_parent = s3c_setparent_clksrc, .get_rate = s3c_getrate_clksrc, .set_rate = s3c_setrate_clksrc, .round_rate = s3c_roundrate_clksrc, }; static struct clk_ops clksrc_ops_nodiv = { .set_parent = s3c_setparent_clksrc, }; static struct clk_ops clksrc_ops_nosrc = { .get_rate = s3c_getrate_clksrc, .set_rate = s3c_setrate_clksrc, .round_rate = s3c_roundrate_clksrc, }; void __init s3c_register_clksrc(struct clksrc_clk *clksrc, int size) { int ret; for (; size > 0; size--, clksrc++) { if (!clksrc->reg_div.reg && !clksrc->reg_src.reg) printk(KERN_ERR "%s: clock %s has no registers set\n", __func__, clksrc->clk.name); /* fill in the default functions */ if (!clksrc->clk.ops) { if (!clksrc->reg_div.reg) clksrc->clk.ops = &clksrc_ops_nodiv; else if (!clksrc->reg_src.reg) clksrc->clk.ops = &clksrc_ops_nosrc; else clksrc->clk.ops = &clksrc_ops; } /* setup the clocksource, but do not announce it * as it may be re-set by the setup routines * called after the rest of the clocks have been * registered */ s3c_set_clksrc(clksrc, false); ret = s3c24xx_register_clock(&clksrc->clk); if (ret < 0) { printk(KERN_ERR "%s: failed to register %s (%d)\n", __func__, clksrc->clk.name, ret); } } }
gpl-2.0
fazerg/android_kernel_ZTE_X9180
drivers/staging/sbe-2t3e3/exar7250.c
8200
7282
/* * SBE 2T3E3 synchronous serial card driver for Linux * * Copyright (C) 2009-2010 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This code is based on a driver written by SBE Inc. */ #include "2t3e3.h" #include "ctrl.h" void exar7250_init(struct channel *sc) { exar7250_write(sc, SBE_2T3E3_FRAMER_REG_OPERATING_MODE, SBE_2T3E3_FRAMER_VAL_T3_CBIT | SBE_2T3E3_FRAMER_VAL_INTERRUPT_ENABLE_RESET | SBE_2T3E3_FRAMER_VAL_TIMING_ASYNCH_TXINCLK); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_IO_CONTROL, SBE_2T3E3_FRAMER_VAL_DISABLE_TX_LOSS_OF_CLOCK | SBE_2T3E3_FRAMER_VAL_DISABLE_RX_LOSS_OF_CLOCK | SBE_2T3E3_FRAMER_VAL_AMI_LINE_CODE | SBE_2T3E3_FRAMER_VAL_RX_LINE_CLOCK_INVERT); exar7250_set_frame_type(sc, SBE_2T3E3_FRAME_TYPE_T3_CBIT); } void exar7250_set_frame_type(struct channel *sc, u32 type) { u32 val; switch (type) { case SBE_2T3E3_FRAME_TYPE_E3_G751: case SBE_2T3E3_FRAME_TYPE_E3_G832: case SBE_2T3E3_FRAME_TYPE_T3_CBIT: case SBE_2T3E3_FRAME_TYPE_T3_M13: break; default: return; } exar7250_stop_intr(sc, type); val = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_OPERATING_MODE); val &= ~(SBE_2T3E3_FRAMER_VAL_LOCAL_LOOPBACK_MODE | SBE_2T3E3_FRAMER_VAL_T3_E3_SELECT | SBE_2T3E3_FRAMER_VAL_FRAME_FORMAT_SELECT); switch (type) { case SBE_2T3E3_FRAME_TYPE_E3_G751: val |= SBE_2T3E3_FRAMER_VAL_E3_G751; break; case SBE_2T3E3_FRAME_TYPE_E3_G832: val |= SBE_2T3E3_FRAMER_VAL_E3_G832; break; case SBE_2T3E3_FRAME_TYPE_T3_CBIT: val |= SBE_2T3E3_FRAMER_VAL_T3_CBIT; break; case SBE_2T3E3_FRAME_TYPE_T3_M13: val |= SBE_2T3E3_FRAMER_VAL_T3_M13; break; default: return; } exar7250_write(sc, SBE_2T3E3_FRAMER_REG_OPERATING_MODE, val); exar7250_start_intr(sc, type); } void exar7250_start_intr(struct channel *sc, u32 type) { u32 val; switch (type) { case SBE_2T3E3_FRAME_TYPE_E3_G751: case SBE_2T3E3_FRAME_TYPE_E3_G832: val = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_CONFIGURATION_STATUS_2); #if 0 sc->s.LOS = val & SBE_2T3E3_FRAMER_VAL_E3_RX_LOS ? 1 : 0; #else cpld_LOS_update(sc); #endif sc->s.OOF = val & SBE_2T3E3_FRAMER_VAL_E3_RX_OOF ? 1 : 0; exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_STATUS_1); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_ENABLE_1, SBE_2T3E3_FRAMER_VAL_E3_RX_OOF_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_LOS_INTERRUPT_ENABLE); #if 0 /*SBE_2T3E3_FRAMER_VAL_E3_RX_COFA_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_OOF_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_LOF_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_LOS_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_AIS_INTERRUPT_ENABLE);*/ #endif exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_STATUS_2); #if 0 exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_ENABLE_2, SBE_2T3E3_FRAMER_VAL_E3_RX_FEBE_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_FERF_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_E3_RX_FRAMING_BYTE_ERROR_INTERRUPT_ENABLE); #endif break; case SBE_2T3E3_FRAME_TYPE_T3_CBIT: case SBE_2T3E3_FRAME_TYPE_T3_M13: val = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_CONFIGURATION_STATUS); #if 0 sc->s.LOS = val & SBE_2T3E3_FRAMER_VAL_T3_RX_LOS ? 1 : 0; #else cpld_LOS_update(sc); #endif sc->s.OOF = val & SBE_2T3E3_FRAMER_VAL_T3_RX_OOF ? 1 : 0; exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_INTERRUPT_STATUS); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_INTERRUPT_ENABLE, SBE_2T3E3_FRAMER_VAL_T3_RX_LOS_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_OOF_INTERRUPT_ENABLE); #if 0 /* SBE_2T3E3_FRAMER_VAL_T3_RX_CP_BIT_ERROR_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_LOS_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_AIS_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_IDLE_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_FERF_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_AIC_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_OOF_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_P_BIT_INTERRUPT_ENABLE);*/ #endif exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_FEAC_INTERRUPT_ENABLE_STATUS); #if 0 exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_FEAC_INTERRUPT_ENABLE_STATUS, SBE_2T3E3_FRAMER_VAL_T3_RX_FEAC_REMOVE_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_T3_RX_FEAC_VALID_INTERRUPT_ENABLE); #endif exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_LAPD_CONTROL, 0); break; default: return; } exar7250_read(sc, SBE_2T3E3_FRAMER_REG_BLOCK_INTERRUPT_STATUS); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_BLOCK_INTERRUPT_ENABLE, SBE_2T3E3_FRAMER_VAL_RX_INTERRUPT_ENABLE | SBE_2T3E3_FRAMER_VAL_TX_INTERRUPT_ENABLE); } void exar7250_stop_intr(struct channel *sc, u32 type) { exar7250_write(sc, SBE_2T3E3_FRAMER_REG_BLOCK_INTERRUPT_ENABLE, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_BLOCK_INTERRUPT_STATUS); switch (type) { case SBE_2T3E3_FRAME_TYPE_E3_G751: case SBE_2T3E3_FRAME_TYPE_E3_G832: exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_ENABLE_1, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_STATUS_1); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_ENABLE_2, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_STATUS_2); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_RX_LAPD_CONTROL, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_LAPD_CONTROL); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_TX_LAPD_STATUS, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_TX_LAPD_STATUS); break; case SBE_2T3E3_FRAME_TYPE_T3_CBIT: case SBE_2T3E3_FRAME_TYPE_T3_M13: exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_INTERRUPT_ENABLE, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_INTERRUPT_STATUS); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_FEAC_INTERRUPT_ENABLE_STATUS, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_FEAC_INTERRUPT_ENABLE_STATUS); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_LAPD_CONTROL, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_LAPD_CONTROL); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_TX_FEAC_CONFIGURATION_STATUS, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_TX_FEAC_CONFIGURATION_STATUS); exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_TX_LAPD_STATUS, 0); exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_TX_LAPD_STATUS); break; } } void exar7250_unipolar_onoff(struct channel *sc, u32 mode) { switch (mode) { case SBE_2T3E3_OFF: exar7300_clear_bit(sc, SBE_2T3E3_FRAMER_REG_IO_CONTROL, SBE_2T3E3_FRAMER_VAL_UNIPOLAR); break; case SBE_2T3E3_ON: exar7300_set_bit(sc, SBE_2T3E3_FRAMER_REG_IO_CONTROL, SBE_2T3E3_FRAMER_VAL_UNIPOLAR); break; } } void exar7250_set_loopback(struct channel *sc, u32 mode) { switch (mode) { case SBE_2T3E3_FRAMER_VAL_LOOPBACK_OFF: exar7300_clear_bit(sc, SBE_2T3E3_FRAMER_REG_OPERATING_MODE, SBE_2T3E3_FRAMER_VAL_LOCAL_LOOPBACK_MODE); break; case SBE_2T3E3_FRAMER_VAL_LOOPBACK_ON: exar7300_set_bit(sc, SBE_2T3E3_FRAMER_REG_OPERATING_MODE, SBE_2T3E3_FRAMER_VAL_LOCAL_LOOPBACK_MODE); break; } }
gpl-2.0