repo_name
string
path
string
copies
string
size
string
content
string
license
string
ChaOSChriS/ChaOS-mako
net/core/stream.c
10396
5229
/* * SUCS NET3: * * Generic stream handling routines. These are generic for most * protocols. Even IP. Tonight 8-). * This is used because TCP, LLC (others too) layer all have mostly * identical sendmsg() and recvmsg() code. * So we (will) share it here. * * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br> * (from old tcp.c code) * Alan Cox <alan@lxorguk.ukuu.org.uk> (Borrowed comments 8-)) */ #include <linux/module.h> #include <linux/net.h> #include <linux/signal.h> #include <linux/tcp.h> #include <linux/wait.h> #include <net/sock.h> /** * sk_stream_write_space - stream socket write_space callback. * @sk: socket * * FIXME: write proper description */ void sk_stream_write_space(struct sock *sk) { struct socket *sock = sk->sk_socket; struct socket_wq *wq; if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { clear_bit(SOCK_NOSPACE, &sock->flags); rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_poll(&wq->wait, POLLOUT | POLLWRNORM | POLLWRBAND); if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } } EXPORT_SYMBOL(sk_stream_write_space); /** * sk_stream_wait_connect - Wait for a socket to get into the connected state * @sk: sock to wait on * @timeo_p: for how long to wait * * Must be called with the socket locked. */ int sk_stream_wait_connect(struct sock *sk, long *timeo_p) { struct task_struct *tsk = current; DEFINE_WAIT(wait); int done; do { int err = sock_error(sk); if (err) return err; if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) return -EPIPE; if (!*timeo_p) return -EAGAIN; if (signal_pending(tsk)) return sock_intr_errno(*timeo_p); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sk->sk_write_pending++; done = sk_wait_event(sk, timeo_p, !sk->sk_err && !((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))); finish_wait(sk_sleep(sk), &wait); sk->sk_write_pending--; } while (!done); return 0; } EXPORT_SYMBOL(sk_stream_wait_connect); /** * sk_stream_closing - Return 1 if we still have things to send in our buffers. * @sk: socket to verify */ static inline int sk_stream_closing(struct sock *sk) { return (1 << sk->sk_state) & (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK); } void sk_stream_wait_close(struct sock *sk, long timeout) { if (timeout) { DEFINE_WAIT(wait); do { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk))) break; } while (!signal_pending(current) && timeout); finish_wait(sk_sleep(sk), &wait); } } EXPORT_SYMBOL(sk_stream_wait_close); /** * sk_stream_wait_memory - Wait for more memory for a socket * @sk: socket to wait for memory * @timeo_p: for how long */ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) { int err = 0; long vm_wait = 0; long current_timeo = *timeo_p; DEFINE_WAIT(wait); if (sk_stream_memory_free(sk)) current_timeo = vm_wait = (net_random() % (HZ / 5)) + 2; while (1) { set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto do_error; if (!*timeo_p) goto do_nonblock; if (signal_pending(current)) goto do_interrupted; clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); if (sk_stream_memory_free(sk) && !vm_wait) break; set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; sk_wait_event(sk, &current_timeo, sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN) || (sk_stream_memory_free(sk) && !vm_wait)); sk->sk_write_pending--; if (vm_wait) { vm_wait -= current_timeo; current_timeo = *timeo_p; if (current_timeo != MAX_SCHEDULE_TIMEOUT && (current_timeo -= vm_wait) < 0) current_timeo = 0; vm_wait = 0; } *timeo_p = current_timeo; } out: finish_wait(sk_sleep(sk), &wait); return err; do_error: err = -EPIPE; goto out; do_nonblock: err = -EAGAIN; goto out; do_interrupted: err = sock_intr_errno(*timeo_p); goto out; } EXPORT_SYMBOL(sk_stream_wait_memory); int sk_stream_error(struct sock *sk, int flags, int err) { if (err == -EPIPE) err = sock_error(sk) ? : -EPIPE; if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); return err; } EXPORT_SYMBOL(sk_stream_error); void sk_stream_kill_queues(struct sock *sk) { /* First the read buffer. */ __skb_queue_purge(&sk->sk_receive_queue); /* Next, the error queue. */ __skb_queue_purge(&sk->sk_error_queue); /* Next, the write queue. */ WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); /* Account for returned memory. */ sk_mem_reclaim(sk); WARN_ON(sk->sk_wmem_queued); WARN_ON(sk->sk_forward_alloc); /* It is _impossible_ for the backlog to contain anything * when we get here. All user references to this socket * have gone away, only the net layer knows can touch it. */ } EXPORT_SYMBOL(sk_stream_kill_queues);
gpl-2.0
AOKP/kernel_samsung_exynos5410
kernel/irq/autoprobe.c
10908
4595
/* * linux/kernel/irq/autoprobe.c * * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar * * This file contains the interrupt probing code and driver APIs. */ #include <linux/irq.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/async.h> #include "internals.h" /* * Autodetection depends on the fact that any interrupt that * comes in on to an unassigned handler will get stuck with * "IRQS_WAITING" cleared and the interrupt disabled. */ static DEFINE_MUTEX(probing_active); /** * probe_irq_on - begin an interrupt autodetect * * Commence probing for an interrupt. The interrupts are scanned * and a mask of potential interrupt lines is returned. * */ unsigned long probe_irq_on(void) { struct irq_desc *desc; unsigned long mask = 0; int i; /* * quiesce the kernel, or at least the asynchronous portion */ async_synchronize_full(); mutex_lock(&probing_active); /* * something may have generated an irq long ago and we want to * flush such a longstanding irq before considering it as spurious. */ for_each_irq_desc_reverse(i, desc) { raw_spin_lock_irq(&desc->lock); if (!desc->action && irq_settings_can_probe(desc)) { /* * Some chips need to know about probing in * progress: */ if (desc->irq_data.chip->irq_set_type) desc->irq_data.chip->irq_set_type(&desc->irq_data, IRQ_TYPE_PROBE); irq_startup(desc, false); } raw_spin_unlock_irq(&desc->lock); } /* Wait for longstanding interrupts to trigger. */ msleep(20); /* * enable any unassigned irqs * (we must startup again here because if a longstanding irq * happened in the previous stage, it may have masked itself) */ for_each_irq_desc_reverse(i, desc) { raw_spin_lock_irq(&desc->lock); if (!desc->action && irq_settings_can_probe(desc)) { desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; if (irq_startup(desc, false)) desc->istate |= IRQS_PENDING; } raw_spin_unlock_irq(&desc->lock); } /* * Wait for spurious interrupts to trigger */ msleep(100); /* * Now filter out any obviously spurious interrupts */ for_each_irq_desc(i, desc) { raw_spin_lock_irq(&desc->lock); if (desc->istate & IRQS_AUTODETECT) { /* It triggered already - consider it spurious. */ if (!(desc->istate & IRQS_WAITING)) { desc->istate &= ~IRQS_AUTODETECT; irq_shutdown(desc); } else if (i < 32) mask |= 1 << i; } raw_spin_unlock_irq(&desc->lock); } return mask; } EXPORT_SYMBOL(probe_irq_on); /** * probe_irq_mask - scan a bitmap of interrupt lines * @val: mask of interrupts to consider * * Scan the interrupt lines and return a bitmap of active * autodetect interrupts. The interrupt probe logic state * is then returned to its previous value. * * Note: we need to scan all the irq's even though we will * only return autodetect irq numbers - just so that we reset * them all to a known state. */ unsigned int probe_irq_mask(unsigned long val) { unsigned int mask = 0; struct irq_desc *desc; int i; for_each_irq_desc(i, desc) { raw_spin_lock_irq(&desc->lock); if (desc->istate & IRQS_AUTODETECT) { if (i < 16 && !(desc->istate & IRQS_WAITING)) mask |= 1 << i; desc->istate &= ~IRQS_AUTODETECT; irq_shutdown(desc); } raw_spin_unlock_irq(&desc->lock); } mutex_unlock(&probing_active); return mask & val; } EXPORT_SYMBOL(probe_irq_mask); /** * probe_irq_off - end an interrupt autodetect * @val: mask of potential interrupts (unused) * * Scans the unused interrupt lines and returns the line which * appears to have triggered the interrupt. If no interrupt was * found then zero is returned. If more than one interrupt is * found then minus the first candidate is returned to indicate * their is doubt. * * The interrupt probe logic state is returned to its previous * value. * * BUGS: When used in a module (which arguably shouldn't happen) * nothing prevents two IRQ probe callers from overlapping. The * results of this are non-optimal. */ int probe_irq_off(unsigned long val) { int i, irq_found = 0, nr_of_irqs = 0; struct irq_desc *desc; for_each_irq_desc(i, desc) { raw_spin_lock_irq(&desc->lock); if (desc->istate & IRQS_AUTODETECT) { if (!(desc->istate & IRQS_WAITING)) { if (!nr_of_irqs) irq_found = i; nr_of_irqs++; } desc->istate &= ~IRQS_AUTODETECT; irq_shutdown(desc); } raw_spin_unlock_irq(&desc->lock); } mutex_unlock(&probing_active); if (nr_of_irqs > 1) irq_found = -irq_found; return irq_found; } EXPORT_SYMBOL(probe_irq_off);
gpl-2.0
lizhm82/devkit8000-kernel
arch/powerpc/sysdev/mpic_pasemi_msi.c
157
4533
/* * Copyright 2007, Olof Johansson, PA Semi * * Based on arch/powerpc/sysdev/mpic_u3msi.c: * * Copyright 2006, Segher Boessenkool, IBM Corporation. * Copyright 2006-2007, Michael Ellerman, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 of the * License. * */ #undef DEBUG #include <linux/irq.h> #include <linux/bootmem.h> #include <linux/msi.h> #include <asm/mpic.h> #include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/msi_bitmap.h> #include "mpic.h" /* Allocate 16 interrupts per device, to give an alignment of 16, * since that's the size of the grouping w.r.t. affinity. If someone * needs more than 32 MSI's down the road we'll have to rethink this, * but it should be OK for now. */ #define ALLOC_CHUNK 16 #define PASEMI_MSI_ADDR 0xfc080000 /* A bit ugly, can we get this from the pci_dev somehow? */ static struct mpic *msi_mpic; static void mpic_pasemi_msi_mask_irq(struct irq_data *data) { pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); mask_msi_irq(data); mpic_mask_irq(data->irq); } static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) { pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); mpic_unmask_irq(data->irq); unmask_msi_irq(data); } static struct irq_chip mpic_pasemi_msi_chip = { .irq_shutdown = mpic_pasemi_msi_mask_irq, .irq_mask = mpic_pasemi_msi_mask_irq, .irq_unmask = mpic_pasemi_msi_unmask_irq, .eoi = mpic_end_irq, .set_type = mpic_set_irq_type, .set_affinity = mpic_set_affinity, .name = "PASEMI-MSI", }; static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type) { if (type == PCI_CAP_ID_MSIX) pr_debug("pasemi_msi: MSI-X untested, trying anyway\n"); return 0; } static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) { struct msi_desc *entry; pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev); list_for_each_entry(entry, &pdev->msi_list, list) { if (entry->irq == NO_IRQ) continue; set_irq_msi(entry->irq, NULL); msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, virq_to_hw(entry->irq), ALLOC_CHUNK); irq_dispose_mapping(entry->irq); } return; } static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { unsigned int virq; struct msi_desc *entry; struct msi_msg msg; int hwirq; pr_debug("pasemi_msi_setup_msi_irqs, pdev %p nvec %d type %d\n", pdev, nvec, type); msg.address_hi = 0; msg.address_lo = PASEMI_MSI_ADDR; list_for_each_entry(entry, &pdev->msi_list, list) { /* Allocate 16 interrupts for now, since that's the grouping for * affinity. This can be changed later if it turns out 32 is too * few MSIs for someone, but restrictions will apply to how the * sources can be changed independently. */ hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, ALLOC_CHUNK); if (hwirq < 0) { pr_debug("pasemi_msi: failed allocating hwirq\n"); return hwirq; } virq = irq_create_mapping(msi_mpic->irqhost, hwirq); if (virq == NO_IRQ) { pr_debug("pasemi_msi: failed mapping hwirq 0x%x\n", hwirq); msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK); return -ENOSPC; } /* Vector on MSI is really an offset, the hardware adds * it to the value written at the magic address. So set * it to 0 to remain sane. */ mpic_set_vector(virq, 0); set_irq_msi(virq, entry); set_irq_chip(virq, &mpic_pasemi_msi_chip); set_irq_type(virq, IRQ_TYPE_EDGE_RISING); pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \ "addr 0x%x\n", virq, hwirq, msg.address_lo); /* Likewise, the device writes [0...511] into the target * register to generate MSI [512...1023] */ msg.data = hwirq-0x200; write_msi_msg(virq, &msg); } return 0; } int mpic_pasemi_msi_init(struct mpic *mpic) { int rc; if (!mpic->irqhost->of_node || !of_device_is_compatible(mpic->irqhost->of_node, "pasemi,pwrficient-openpic")) return -ENODEV; rc = mpic_msi_init_allocator(mpic); if (rc) { pr_debug("pasemi_msi: Error allocating bitmap!\n"); return rc; } pr_debug("pasemi_msi: Registering PA Semi MPIC MSI callbacks\n"); msi_mpic = mpic; WARN_ON(ppc_md.setup_msi_irqs); ppc_md.setup_msi_irqs = pasemi_msi_setup_msi_irqs; ppc_md.teardown_msi_irqs = pasemi_msi_teardown_msi_irqs; ppc_md.msi_check_device = pasemi_msi_check_device; return 0; }
gpl-2.0
pengshp/linux
drivers/virtio/virtio_balloon.c
413
17462
/* * Virtio balloon implementation, inspired by Dor Laor and Marcelo * Tosatti's implementations. * * Copyright 2008 Rusty Russell IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/virtio.h> #include <linux/virtio_balloon.h> #include <linux/swap.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/balloon_compaction.h> #include <linux/oom.h> #include <linux/wait.h> /* * Balloon device works in 4K page units. So each page is pointed to by * multiple balloon pages. All memory counters in this driver are in balloon * page units. */ #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 #define OOM_VBALLOON_DEFAULT_PAGES 256 #define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES; module_param(oom_pages, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(oom_pages, "pages to free on OOM"); struct virtio_balloon { struct virtio_device *vdev; struct virtqueue *inflate_vq, *deflate_vq, *stats_vq; /* Where the ballooning thread waits for config to change. */ wait_queue_head_t config_change; /* The thread servicing the balloon. */ struct task_struct *thread; /* Waiting for host to ack the pages we released. */ wait_queue_head_t acked; /* Number of balloon pages we've told the Host we're not using. */ unsigned int num_pages; /* * The pages we've told the Host we're not using are enqueued * at vb_dev_info->pages list. * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE * to num_pages above. */ struct balloon_dev_info vb_dev_info; /* Synchronize access/update to this struct virtio_balloon elements */ struct mutex balloon_lock; /* The array of pfns we tell the Host about. */ unsigned int num_pfns; u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; /* Memory statistics */ int need_stats_update; struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; /* To register callback in oom notifier call chain */ struct notifier_block nb; }; static struct virtio_device_id id_table[] = { { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID }, { 0 }, }; static u32 page_to_balloon_pfn(struct page *page) { unsigned long pfn = page_to_pfn(page); BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT); /* Convert pfn from Linux page size to balloon page size. */ return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE; } static struct page *balloon_pfn_to_page(u32 pfn) { BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE); return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE); } static void balloon_ack(struct virtqueue *vq) { struct virtio_balloon *vb = vq->vdev->priv; wake_up(&vb->acked); } static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) { struct scatterlist sg; unsigned int len; sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); /* We should always be able to add one buffer to an empty queue. */ virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); virtqueue_kick(vq); /* When host has read buffer, this completes via balloon_ack */ wait_event(vb->acked, virtqueue_get_buf(vq, &len)); } static void set_page_pfns(u32 pfns[], struct page *page) { unsigned int i; /* Set balloon pfns pointing at this page. * Note that the first pfn points at start of the page. */ for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++) pfns[i] = page_to_balloon_pfn(page) + i; } static void fill_balloon(struct virtio_balloon *vb, size_t num) { struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; /* We can only do one array worth at a time. */ num = min(num, ARRAY_SIZE(vb->pfns)); mutex_lock(&vb->balloon_lock); for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { struct page *page = balloon_page_enqueue(vb_dev_info); if (!page) { dev_info_ratelimited(&vb->vdev->dev, "Out of puff! Can't get %u pages\n", VIRTIO_BALLOON_PAGES_PER_PAGE); /* Sleep for at least 1/5 of a second before retry. */ msleep(200); break; } set_page_pfns(vb->pfns + vb->num_pfns, page); vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; adjust_managed_page_count(page, -1); } /* Did we get any? */ if (vb->num_pfns != 0) tell_host(vb, vb->inflate_vq); mutex_unlock(&vb->balloon_lock); } static void release_pages_by_pfn(const u32 pfns[], unsigned int num) { unsigned int i; /* Find pfns pointing at start of each page, get pages and free them. */ for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { struct page *page = balloon_pfn_to_page(pfns[i]); adjust_managed_page_count(page, 1); put_page(page); /* balloon reference */ } } static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) { unsigned num_freed_pages; struct page *page; struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; /* We can only do one array worth at a time. */ num = min(num, ARRAY_SIZE(vb->pfns)); mutex_lock(&vb->balloon_lock); for (vb->num_pfns = 0; vb->num_pfns < num; vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { page = balloon_page_dequeue(vb_dev_info); if (!page) break; set_page_pfns(vb->pfns + vb->num_pfns, page); vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; } num_freed_pages = vb->num_pfns; /* * Note that if * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); * is true, we *have* to do it in this order */ if (vb->num_pfns != 0) tell_host(vb, vb->deflate_vq); mutex_unlock(&vb->balloon_lock); release_pages_by_pfn(vb->pfns, vb->num_pfns); return num_freed_pages; } static inline void update_stat(struct virtio_balloon *vb, int idx, u16 tag, u64 val) { BUG_ON(idx >= VIRTIO_BALLOON_S_NR); vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag); vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val); } #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) static void update_balloon_stats(struct virtio_balloon *vb) { unsigned long events[NR_VM_EVENT_ITEMS]; struct sysinfo i; int idx = 0; all_vm_events(events); si_meminfo(&i); update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, pages_to_bytes(events[PSWPIN])); update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, pages_to_bytes(events[PSWPOUT])); update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, pages_to_bytes(i.freeram)); update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, pages_to_bytes(i.totalram)); } /* * While most virtqueues communicate guest-initiated requests to the hypervisor, * the stats queue operates in reverse. The driver initializes the virtqueue * with a single buffer. From that point forward, all conversations consist of * a hypervisor request (a call to this function) which directs us to refill * the virtqueue with a fresh stats buffer. Since stats collection can sleep, * we notify our kthread which does the actual work via stats_handle_request(). */ static void stats_request(struct virtqueue *vq) { struct virtio_balloon *vb = vq->vdev->priv; vb->need_stats_update = 1; wake_up(&vb->config_change); } static void stats_handle_request(struct virtio_balloon *vb) { struct virtqueue *vq; struct scatterlist sg; unsigned int len; vb->need_stats_update = 0; update_balloon_stats(vb); vq = vb->stats_vq; if (!virtqueue_get_buf(vq, &len)) return; sg_init_one(&sg, vb->stats, sizeof(vb->stats)); virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); virtqueue_kick(vq); } static void virtballoon_changed(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; wake_up(&vb->config_change); } static inline s64 towards_target(struct virtio_balloon *vb) { s64 target; u32 num_pages; virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &num_pages); /* Legacy balloon config space is LE, unlike all other devices. */ if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) num_pages = le32_to_cpu((__force __le32)num_pages); target = num_pages; return target - vb->num_pages; } static void update_balloon_size(struct virtio_balloon *vb) { u32 actual = vb->num_pages; /* Legacy balloon config space is LE, unlike all other devices. */ if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) actual = (__force u32)cpu_to_le32(actual); virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual, &actual); } /* * virtballoon_oom_notify - release pages when system is under severe * memory pressure (called from out_of_memory()) * @self : notifier block struct * @dummy: not used * @parm : returned - number of freed pages * * The balancing of memory by use of the virtio balloon should not cause * the termination of processes while there are pages in the balloon. * If virtio balloon manages to release some memory, it will make the * system return and retry the allocation that forced the OOM killer * to run. */ static int virtballoon_oom_notify(struct notifier_block *self, unsigned long dummy, void *parm) { struct virtio_balloon *vb; unsigned long *freed; unsigned num_freed_pages; vb = container_of(self, struct virtio_balloon, nb); if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) return NOTIFY_OK; freed = parm; num_freed_pages = leak_balloon(vb, oom_pages); update_balloon_size(vb); *freed += num_freed_pages; return NOTIFY_OK; } static int balloon(void *_vballoon) { struct virtio_balloon *vb = _vballoon; DEFINE_WAIT_FUNC(wait, woken_wake_function); set_freezable(); while (!kthread_should_stop()) { s64 diff; try_to_freeze(); add_wait_queue(&vb->config_change, &wait); for (;;) { if ((diff = towards_target(vb)) != 0 || vb->need_stats_update || kthread_should_stop() || freezing(current)) break; wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(&vb->config_change, &wait); if (vb->need_stats_update) stats_handle_request(vb); if (diff > 0) fill_balloon(vb, diff); else if (diff < 0) leak_balloon(vb, -diff); update_balloon_size(vb); /* * For large balloon changes, we could spend a lot of time * and always have work to do. Be nice if preempt disabled. */ cond_resched(); } return 0; } static int init_vqs(struct virtio_balloon *vb) { struct virtqueue *vqs[3]; vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request }; const char *names[] = { "inflate", "deflate", "stats" }; int err, nvqs; /* * We expect two virtqueues: inflate and deflate, and * optionally stat. */ nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2; err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names); if (err) return err; vb->inflate_vq = vqs[0]; vb->deflate_vq = vqs[1]; if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { struct scatterlist sg; vb->stats_vq = vqs[2]; /* * Prime this virtqueue with one buffer so the hypervisor can * use it to signal us later (it can't be broken yet!). */ sg_init_one(&sg, vb->stats, sizeof vb->stats); if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) < 0) BUG(); virtqueue_kick(vb->stats_vq); } return 0; } #ifdef CONFIG_BALLOON_COMPACTION /* * virtballoon_migratepage - perform the balloon page migration on behalf of * a compation thread. (called under page lock) * @vb_dev_info: the balloon device * @newpage: page that will replace the isolated page after migration finishes. * @page : the isolated (old) page that is about to be migrated to newpage. * @mode : compaction mode -- not used for balloon page migration. * * After a ballooned page gets isolated by compaction procedures, this is the * function that performs the page migration on behalf of a compaction thread * The page migration for virtio balloon is done in a simple swap fashion which * follows these two macro steps: * 1) insert newpage into vb->pages list and update the host about it; * 2) update the host about the old page removed from vb->pages list; * * This function preforms the balloon page migration task. * Called through balloon_mapping->a_ops->migratepage */ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, struct page *newpage, struct page *page, enum migrate_mode mode) { struct virtio_balloon *vb = container_of(vb_dev_info, struct virtio_balloon, vb_dev_info); unsigned long flags; /* * In order to avoid lock contention while migrating pages concurrently * to leak_balloon() or fill_balloon() we just give up the balloon_lock * this turn, as it is easier to retry the page migration later. * This also prevents fill_balloon() getting stuck into a mutex * recursion in the case it ends up triggering memory compaction * while it is attempting to inflate the ballon. */ if (!mutex_trylock(&vb->balloon_lock)) return -EAGAIN; get_page(newpage); /* balloon reference */ /* balloon's page migration 1st step -- inflate "newpage" */ spin_lock_irqsave(&vb_dev_info->pages_lock, flags); balloon_page_insert(vb_dev_info, newpage); vb_dev_info->isolated_pages--; __count_vm_event(BALLOON_MIGRATE); spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; set_page_pfns(vb->pfns, newpage); tell_host(vb, vb->inflate_vq); /* balloon's page migration 2nd step -- deflate "page" */ balloon_page_delete(page); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; set_page_pfns(vb->pfns, page); tell_host(vb, vb->deflate_vq); mutex_unlock(&vb->balloon_lock); put_page(page); /* balloon reference */ return MIGRATEPAGE_SUCCESS; } #endif /* CONFIG_BALLOON_COMPACTION */ static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; int err; if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); return -EINVAL; } vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); if (!vb) { err = -ENOMEM; goto out; } vb->num_pages = 0; mutex_init(&vb->balloon_lock); init_waitqueue_head(&vb->config_change); init_waitqueue_head(&vb->acked); vb->vdev = vdev; vb->need_stats_update = 0; balloon_devinfo_init(&vb->vb_dev_info); #ifdef CONFIG_BALLOON_COMPACTION vb->vb_dev_info.migratepage = virtballoon_migratepage; #endif err = init_vqs(vb); if (err) goto out_free_vb; vb->nb.notifier_call = virtballoon_oom_notify; vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY; err = register_oom_notifier(&vb->nb); if (err < 0) goto out_oom_notify; virtio_device_ready(vdev); vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { err = PTR_ERR(vb->thread); goto out_del_vqs; } return 0; out_del_vqs: unregister_oom_notifier(&vb->nb); out_oom_notify: vdev->config->del_vqs(vdev); out_free_vb: kfree(vb); out: return err; } static void remove_common(struct virtio_balloon *vb) { /* There might be pages left in the balloon: free them. */ while (vb->num_pages) leak_balloon(vb, vb->num_pages); update_balloon_size(vb); /* Now we reset the device so we can clean up the queues. */ vb->vdev->config->reset(vb->vdev); vb->vdev->config->del_vqs(vb->vdev); } static void virtballoon_remove(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; unregister_oom_notifier(&vb->nb); kthread_stop(vb->thread); remove_common(vb); kfree(vb); } #ifdef CONFIG_PM_SLEEP static int virtballoon_freeze(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; /* * The kthread is already frozen by the PM core before this * function is called. */ remove_common(vb); return 0; } static int virtballoon_restore(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; int ret; ret = init_vqs(vdev->priv); if (ret) return ret; virtio_device_ready(vdev); fill_balloon(vb, towards_target(vb)); update_balloon_size(vb); return 0; } #endif static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST, VIRTIO_BALLOON_F_STATS_VQ, VIRTIO_BALLOON_F_DEFLATE_ON_OOM, }; static struct virtio_driver virtio_balloon_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtballoon_probe, .remove = virtballoon_remove, .config_changed = virtballoon_changed, #ifdef CONFIG_PM_SLEEP .freeze = virtballoon_freeze, .restore = virtballoon_restore, #endif }; module_virtio_driver(virtio_balloon_driver); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio balloon driver"); MODULE_LICENSE("GPL");
gpl-2.0
tilaksidduram/Stock_kernel
arch/arm/mach-exynos/board-p8ltevzw-modems.c
413
21130
/* linux/arch/arm/mach-xxxx/board-p8vzw-modems.c * Copyright (C) 2010 Samsung Electronics. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/regulator/consumer.h> #include <linux/err.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/clk.h> /* inlcude platform specific file */ #include <linux/platform_data/modem_na.h> #include <mach/sec_modem.h> #include <mach/gpio.h> #include <mach/gpio-exynos4.h> #include <plat/gpio-cfg.h> #include <mach/regs-mem.h> #include <plat/regs-srom.h> #include <plat/devs.h> #include <plat/ehci.h> #define IDPRAM_SIZE 0x4000 #define IDPRAM_PHY_START 0x13A00000 #define IDPRAM_PHY_END (IDPRAM_PHY_START + IDPRAM_SIZE) #define MAGIC_DMDL 0x4445444C /*S5PV210 Interanl Dpram Special Function Register*/ #define IDPRAM_MIFCON_INT2APEN (1<<2) #define IDPRAM_MIFCON_INT2MSMEN (1<<3) #define IDPRAM_MIFCON_DMATXREQEN_0 (1<<16) #define IDPRAM_MIFCON_DMATXREQEN_1 (1<<17) #define IDPRAM_MIFCON_DMARXREQEN_0 (1<<18) #define IDPRAM_MIFCON_DMARXREQEN_1 (1<<19) #define IDPRAM_MIFCON_FIXBIT (1<<20) #define IDPRAM_MIFPCON_ADM_MODE (1<<6) /* mux / demux mode */ #define IDPRAM_DMA_ADR_MASK 0x3FFF #define IDPRAM_DMA_TX_ADR_0 /* shift 0 */ #define IDPRAM_DMA_TX_ADR_1 /* shift 16 */ #define IDPRAM_DMA_RX_ADR_0 /* shift 0 */ #define IDPRAM_DMA_RX_ADR_1 /* shift 16 */ #define IDPRAM_SFR_PHYSICAL_ADDR 0x13A08000 #define IDPRAM_SFR_SIZE 0x1C #define IDPRAM_ADDRESS_DEMUX static int __init init_modem(void); static int p8_lte_ota_reset(void); struct idpram_sfr_reg { unsigned int2ap; unsigned int2msm; unsigned mifcon; unsigned mifpcon; unsigned msmintclr; unsigned dma_tx_adr; unsigned dma_rx_adr; }; /*S5PV210 Internal Dpram GPIO table*/ struct idpram_gpio_data { unsigned num; unsigned cfg; unsigned pud; unsigned val; }; static volatile void __iomem *s5pv310_dpram_sfr_va; static struct idpram_gpio_data idpram_gpio_address[] = { #ifdef IDPRAM_ADDRESS_DEMUX { .num = EXYNOS4210_GPE1(0), /* MSM_ADDR 0 -12 */ .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE1(1), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE1(2), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE1(3), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE1(4), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE1(5), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE1(6), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE1(7), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE2(0), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE2(1), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE2(2), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE2(3), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE2(4), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE2(5), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, #endif }; static struct idpram_gpio_data idpram_gpio_data[] = { { .num = EXYNOS4210_GPE3(0), /* MSM_DATA 0 - 15 */ .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE3(1), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE3(2), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE3(3), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE3(4), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE3(5), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE3(6), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE3(7), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE4(0), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE4(1), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE4(2), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE4(3), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE4(4), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE4(5), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE4(6), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE4(7), .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, }; static struct idpram_gpio_data idpram_gpio_init_control[] = { { .num = EXYNOS4210_GPE0(1), /* MDM_CSn */ .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE0(0), /* MDM_WEn */ .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE0(2), /* MDM_Rn */ .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, { .num = EXYNOS4210_GPE0(3), /* MDM_IRQn */ .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_UP, }, #ifndef IDPRAM_ADDRESS_DEMUX { .num = EXYNOS4210_GPE0(4), /* MDM_ADVN */ .cfg = S3C_GPIO_SFN(0x2), .pud = S3C_GPIO_PULL_NONE, }, #endif }; static void idpram_gpio_cfg(struct idpram_gpio_data *gpio) { printk(KERN_DEBUG "MIF: idpram set gpio num=%d, cfg=0x%x, pud=%d, val=%d\n", gpio->num, gpio->cfg, gpio->pud, gpio->val); s3c_gpio_cfgpin(gpio->num, gpio->cfg); s3c_gpio_setpull(gpio->num, gpio->pud); if (gpio->val) gpio_set_value(gpio->num, gpio->val); } static void idpram_gpio_init(void) { int i; #ifdef IDPRAM_ADDRESS_DEMUX for (i = 0; i < ARRAY_SIZE(idpram_gpio_address); i++) idpram_gpio_cfg(&idpram_gpio_address[i]); #endif for (i = 0; i < ARRAY_SIZE(idpram_gpio_data); i++) idpram_gpio_cfg(&idpram_gpio_data[i]); for (i = 0; i < ARRAY_SIZE(idpram_gpio_init_control); i++) idpram_gpio_cfg(&idpram_gpio_init_control[i]); } static void idpram_sfr_init(void) { volatile struct idpram_sfr_reg __iomem *sfr = s5pv310_dpram_sfr_va; sfr->mifcon = (IDPRAM_MIFCON_FIXBIT | IDPRAM_MIFCON_INT2APEN | IDPRAM_MIFCON_INT2MSMEN); #ifndef IDPRAM_ADDRESS_DEMUX sfr->mifpcon = (IDPRAM_MIFPCON_ADM_MODE); #endif } static void idpram_init(void) { struct clk *clk; /* enable internal dpram clock */ clk = clk_get(NULL, "modem"); if (!clk) pr_err("MIF: idpram failed to get clock %s\n", __func__); clk_enable(clk); if (!s5pv310_dpram_sfr_va) { s5pv310_dpram_sfr_va = (struct idpram_sfr_reg __iomem *) ioremap_nocache(IDPRAM_SFR_PHYSICAL_ADDR, IDPRAM_SFR_SIZE); if (!s5pv310_dpram_sfr_va) { printk(KERN_ERR "MIF: idpram_sfr_base io-remap fail\n"); /*iounmap(idpram_base);*/ } } idpram_sfr_init(); } static void idpram_clr_intr(void) { volatile struct idpram_sfr_reg __iomem *sfr = s5pv310_dpram_sfr_va; sfr->msmintclr = 0xFF; } /* magic_code + access_enable + fmt_tx_head + fmt_tx_tail + fmt_tx_buff + raw_tx_head + raw_tx_tail + raw_tx_buff + fmt_rx_head + fmt_rx_tail + fmt_rx_buff + raw_rx_head + raw_rx_tail + raw_rx_buff + padding + mbx_cp2ap + mbx_ap2cp = 2 + 2 + 2 + 2 + 2044 + 2 + 2 + 6128 + 2 + 2 + 2044 + 2 + 2 + 6128 + 16 + 2 + 2 = 16384 */ #define CBP_DP_FMT_TX_BUFF_SZ 2044 #define CBP_DP_RAW_TX_BUFF_SZ 6128 #define CBP_DP_FMT_RX_BUFF_SZ 2044 #define CBP_DP_RAW_RX_BUFF_SZ 6128 #define MAX_CBP_IDPRAM_IPC_DEV (IPC_RAW + 1) /* FMT, RAW */ /* ** CDMA target platform data */ static struct modem_io_t cdma_io_devices[] = { [0] = { .name = "multipdp", .id = 0x1, .format = IPC_MULTI_RAW, .io_type = IODEV_DUMMY, .link = LINKDEV_DPRAM, }, [1] = { .name = "cdma_ipc0", .id = 0x1, .format = IPC_FMT, .io_type = IODEV_MISC, .link = LINKDEV_DPRAM, }, [2] = { .name = "cdma_rfs0", .id = 0x33, /* 0x13 (ch.id) | 0x20 (mask) */ .format = IPC_RAW, .io_type = IODEV_MISC, .link = LINKDEV_DPRAM, }, [3] = { .name = "cdma_boot0", .id = 0x1, .format = IPC_BOOT, .io_type = IODEV_MISC, .link = LINKDEV_DPRAM, }, [4] = { .name = "cdma_rmnet0", .id = 0x2A, .format = IPC_RAW, .io_type = IODEV_NET, .link = LINKDEV_DPRAM, }, [5] = { .name = "cdma_rmnet1", .id = 0x2B, .format = IPC_RAW, .io_type = IODEV_NET, .link = LINKDEV_DPRAM, }, [6] = { .name = "cdma_rmnet2", .id = 0x2C, .format = IPC_RAW, .io_type = IODEV_NET, .link = LINKDEV_DPRAM, }, [7] = { .name = "cdma_rmnet3", .id = 0x2D, .format = IPC_RAW, .io_type = IODEV_NET, .link = LINKDEV_DPRAM, }, [8] = { .name = "cdma_rmnet4", .id = 0x27, .format = IPC_RAW, .io_type = IODEV_NET, .link = LINKDEV_DPRAM, }, [9] = { .name = "cdma_rmnet5", /* DM Port IO device */ .id = 0x3A, .format = IPC_RAW, .io_type = IODEV_MISC, .link = LINKDEV_DPRAM, }, [10] = { .name = "cdma_rmnet6", /* AT CMD IO device */ .id = 0x31, .format = IPC_RAW, .io_type = IODEV_MISC, .link = LINKDEV_DPRAM, }, [11] = { .name = "cdma_ramdump0", .id = 0x1, .format = IPC_RAMDUMP, .io_type = IODEV_MISC, .link = LINKDEV_DPRAM, }, [12] = { .name = "cdma_cplog", /* cp log io-device */ .id = 0x3D, .format = IPC_RAW, .io_type = IODEV_MISC, .link = LINKDEV_DPRAM, }, [13] = { .name = "cdma_router", /* AT commands */ .id = 0x39, .format = IPC_RAW, .io_type = IODEV_MISC, .link = LINKDEV_DPRAM, }, }; static struct modem_data cdma_modem_data = { .name = "cbp7.1", .gpio_cp_on = GPIO_PHONE_ON, .gpio_cp_off = GPIO_VIA_PS_HOLD_OFF, .gpio_cp_reset = GPIO_CP_RST, .gpio_pda_active = GPIO_PDA_ACTIVE, .gpio_phone_active = GPIO_PHONE_ACTIVE, .gpio_ap_wakeup = GPIO_CP_AP_DPRAM_INT, .gpio_mbx_intr = GPIO_VIA_DPRAM_INT_N, .modem_net = CDMA_NETWORK, .modem_type = VIA_CBP71, .link_type = LINKDEV_DPRAM, .num_iodevs = ARRAY_SIZE(cdma_io_devices), .iodevs = cdma_io_devices, .clear_intr = idpram_clr_intr, .ota_reset = p8_lte_ota_reset, .sfr_init = idpram_sfr_init, .align = 1, /* Adjust the IPC raw and Multi Raw HDLC buffer offsets */ }; static struct resource cdma_modem_res[] = { [0] = { .name = "dpram", .start = IDPRAM_PHY_START, .end = IDPRAM_PHY_END, .flags = IORESOURCE_MEM, }, [1] = { .name = "dpram_irq", .start = IRQ_MODEM_IF, .end = IRQ_MODEM_IF, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cdma_modem = { .name = "modem_if", .id = 1, .num_resources = ARRAY_SIZE(cdma_modem_res), .resource = cdma_modem_res, .dev = { .platform_data = &cdma_modem_data, }, }; static int p8_lte_ota_reset(void) { unsigned gpio_cp_rst = cdma_modem_data.gpio_cp_reset; unsigned gpio_cp_on = cdma_modem_data.gpio_cp_on; unsigned int *magickey_va; int i; pr_err("[MODEM_IF] %s Modem OTA reset\n", __func__); magickey_va = ioremap_nocache(IDPRAM_PHY_START, sizeof(unsigned int)); if (!magickey_va) { pr_err("%s: ioremap fail\n", __func__); return -ENOMEM; } gpio_set_value(gpio_cp_on, 1); msleep(100); gpio_set_value(gpio_cp_rst, 0); for (i = 0; i < 3; i++) { *magickey_va = MAGIC_DMDL; if (*magickey_va == MAGIC_DMDL) { pr_err("magic key is ok!"); break; } } msleep(500); gpio_set_value(gpio_cp_rst, 1); for (i = 0; i < 3; i++) { *magickey_va = MAGIC_DMDL; if (*magickey_va == MAGIC_DMDL) { pr_err("magic key is ok!"); break; } } iounmap(magickey_va); return 0; } static void config_cdma_modem_gpio(void) { int err; unsigned gpio_cp_on = cdma_modem_data.gpio_cp_on; unsigned gpio_cp_off = cdma_modem_data.gpio_cp_off; unsigned gpio_cp_rst = cdma_modem_data.gpio_cp_reset; unsigned gpio_pda_active = cdma_modem_data.gpio_pda_active; unsigned gpio_phone_active = cdma_modem_data.gpio_phone_active; unsigned gpio_ap_wakeup = cdma_modem_data.gpio_ap_wakeup; pr_info("MIF: <%s>\n", __func__); if (gpio_cp_on) { err = gpio_request(gpio_cp_on, "VIACP_ON"); if (err) pr_err("fail to request gpio %s\n", "VIACP_ON"); else gpio_direction_output(gpio_cp_on, 0); } if (gpio_cp_rst) { err = gpio_request(gpio_cp_rst, "VAICP_RST"); if (err) pr_err("fail to request gpio %s\n", "VIACP_RST"); else gpio_direction_output(gpio_cp_rst, 0); } if (gpio_cp_off) { err = gpio_request(gpio_cp_off, "VAICP_OFF"); if (err) pr_err("fail to request gpio %s\n", "VIACP_OFF"); else gpio_direction_output(gpio_cp_off, 1); } if (gpio_pda_active) { err = gpio_request(gpio_pda_active, "PDA_ACTIVE"); if (err) pr_err("fail to request gpio %s\n", "PDA_ACTIVE"); else gpio_direction_output(gpio_pda_active, 0); } if (gpio_phone_active) { err = gpio_request(gpio_phone_active, "PHONE_ACTIVE"); if (err) { pr_err("fail to request gpio %s\n", "PHONE_ACTIVE"); } else { s3c_gpio_cfgpin(gpio_phone_active, S3C_GPIO_SFN(0xF)); s3c_gpio_setpull(gpio_phone_active, S3C_GPIO_PULL_NONE); } } if (gpio_ap_wakeup) { err = gpio_request(GPIO_CP_AP_DPRAM_INT, "HOST_WAKEUP"); if (err) { pr_err("fail to request gpio %s\n", "HOST_WAKEUP"); } else { s3c_gpio_cfgpin(GPIO_CP_AP_DPRAM_INT, \ S3C_GPIO_SFN(0xF)); s3c_gpio_setpull(GPIO_CP_AP_DPRAM_INT, \ S3C_GPIO_PULL_NONE); } } } /* lte target platform data */ static struct modem_io_t lte_io_devices[] = { [0] = { .name = "lte_ipc0", .id = 0x1, .format = IPC_FMT, .io_type = IODEV_MISC, .link = LINKDEV_USB, }, [1] = { .name = "lte_rmnet0", .id = 0x2A, .format = IPC_RAW, .io_type = IODEV_NET, .link = LINKDEV_USB, }, [2] = { .name = "lte_rfs0", .id = 0x0, .format = IPC_RFS, .io_type = IODEV_MISC, .link = LINKDEV_USB, }, [3] = { .name = "lte_boot0", .id = 0x0, .format = IPC_BOOT, .io_type = IODEV_MISC, .link = LINKDEV_USB, }, [4] = { .name = "lte_rmnet1", .id = 0x2B, .format = IPC_RAW, .io_type = IODEV_NET, .link = LINKDEV_USB, }, [5] = { .name = "lte_rmnet2", .id = 0x2C, .format = IPC_RAW, .io_type = IODEV_NET, .link = LINKDEV_USB, }, [6] = { .name = "lte_rmnet3", .id = 0x2D, .format = IPC_RAW, .io_type = IODEV_NET, .link = LINKDEV_USB, }, [7] = { .name = "lte_multipdp", .id = 0x1, .format = IPC_MULTI_RAW, .io_type = IODEV_DUMMY, .link = LINKDEV_USB, }, [8] = { .name = "lte_rmnet4", /* DM Port io-device */ .id = 0x3F, .format = IPC_RAW, .io_type = IODEV_MISC, .link = LINKDEV_USB, }, [9] = { .name = "lte_ramdump0", .id = 0x0, .format = IPC_RAMDUMP, .io_type = IODEV_MISC, .link = LINKDEV_USB, }, }; static struct modemlink_pm_data lte_link_pm_data = { .name = "lte_link_pm", .gpio_link_enable = 0, .gpio_link_active = GPIO_AP2LTE_STATUS, .gpio_link_hostwake = GPIO_LTE2AP_WAKEUP, .gpio_link_slavewake = GPIO_AP2LTE_WAKEUP, /* .port_enable = host_port_enable, .freqlock = ATOMIC_INIT(0), .cpufreq_lock = exynos_cpu_frequency_lock, .cpufreq_unlock = exynos_cpu_frequency_unlock, */ }; static struct modem_data lte_modem_data = { .name = "cmc220", .gpio_cp_on = GPIO_220_PMIC_PWRON, .gpio_reset_req_n = 0, .gpio_cp_reset = GPIO_CMC_RST, .gpio_pda_active = 0,/*NOT YET CONNECTED*/ .gpio_phone_active = GPIO_LTE_ACTIVE, .gpio_cp_dump_int = GPIO_LTE_ACTIVE,/*TO BE CHECKED*/ .gpio_cp_warm_reset = 0, /*.gpio_cp_off = GPIO_220_PMIC_PWRHOLD_OFF,*/ #ifdef CONFIG_LTE_MODEM_CMC220 .gpio_cp_off = GPIO_LTE_PS_HOLD_OFF, .gpio_slave_wakeup = GPIO_AP2LTE_WAKEUP, .gpio_host_wakeup = GPIO_LTE2AP_WAKEUP, .gpio_host_active = GPIO_AP2LTE_STATUS, #endif .modem_type = SEC_CMC220, .link_type = LINKDEV_USB, .modem_net = LTE_NETWORK, .num_iodevs = ARRAY_SIZE(lte_io_devices), .iodevs = lte_io_devices, .link_pm_data = &lte_link_pm_data, }; static struct resource lte_modem_res[] = { [0] = { .name = "lte_phone_active", /* phone active irq */ .start = IRQ_LTE_ACTIVE, .end = IRQ_LTE_ACTIVE, .flags = IORESOURCE_IRQ, }, [1] = { .name = "lte_host_wakeup", /* host wakeup irq */ .start = IRQ_LTE2AP_WAKEUP, .end = IRQ_LTE2AP_WAKEUP, .flags = IORESOURCE_IRQ, }, }; static struct platform_device lte_modem_wake = { .name = "modem_lte_wake", .id = -1, }; static struct platform_device lte_modem = { .name = "modem_if", .id = 2, .num_resources = ARRAY_SIZE(lte_modem_res), .resource = lte_modem_res, .dev = { .platform_data = &lte_modem_data, }, }; static void lte_modem_cfg_gpio(void) { unsigned gpio_cp_on = lte_modem_data.gpio_cp_on; unsigned gpio_cp_rst = lte_modem_data.gpio_cp_reset; unsigned gpio_phone_active = lte_modem_data.gpio_phone_active; #ifdef CONFIG_LTE_MODEM_CMC220 unsigned gpio_cp_off = lte_modem_data.gpio_cp_off; unsigned gpio_slave_wakeup = lte_modem_data.gpio_slave_wakeup; unsigned gpio_host_wakeup = lte_modem_data.gpio_host_wakeup; unsigned gpio_host_active = lte_modem_data.gpio_host_active; #endif if (gpio_cp_on) { gpio_request(gpio_cp_on, "LTE_ON"); gpio_direction_output(gpio_cp_on, 0); s3c_gpio_setpull(gpio_cp_on, S3C_GPIO_PULL_NONE); } if (gpio_cp_rst) { gpio_request(gpio_cp_rst, "LTE_RST"); gpio_direction_output(gpio_cp_rst, 0); s3c_gpio_setpull(gpio_cp_rst, S3C_GPIO_PULL_NONE); } if (gpio_phone_active) { gpio_request(gpio_phone_active, "LTE_ACTIVE"); gpio_direction_input(gpio_phone_active); s3c_gpio_setpull(gpio_phone_active, S3C_GPIO_PULL_DOWN); s3c_gpio_cfgpin(gpio_phone_active, S3C_GPIO_SFN(0xF)); } #ifdef CONFIG_LTE_MODEM_CMC220 if (gpio_cp_off) { gpio_request(gpio_cp_off, "LTE_OFF"); gpio_direction_output(gpio_cp_off, 1); s3c_gpio_setpull(gpio_cp_off, S3C_GPIO_PULL_NONE); } if (gpio_slave_wakeup) { gpio_request(gpio_slave_wakeup, "LTE_SLAVE_WAKEUP"); gpio_direction_output(gpio_slave_wakeup, 0); s3c_gpio_setpull(gpio_slave_wakeup, S3C_GPIO_PULL_NONE); } if (gpio_host_wakeup) { gpio_request(gpio_host_wakeup, "LTE_HOST_WAKEUP"); gpio_direction_input(gpio_host_wakeup); s3c_gpio_setpull(gpio_host_wakeup, S3C_GPIO_PULL_DOWN); s3c_gpio_cfgpin(gpio_host_wakeup, S3C_GPIO_SFN(0xF)); } if (gpio_host_active) { gpio_request(gpio_host_active, "LTE_HOST_ACTIVE"); gpio_direction_output(gpio_host_active, 1); s3c_gpio_setpull(gpio_host_active, S3C_GPIO_PULL_NONE); } #endif } void set_host_states(struct platform_device *pdev, int type) { int spin = 20; if (!type) { gpio_direction_output(lte_modem_data.gpio_host_active, type); return; } if (gpio_get_value(lte_modem_data.gpio_host_wakeup)) { gpio_direction_output(lte_modem_data.gpio_host_active, type); mdelay(10); while (spin--) { if (!gpio_get_value(lte_modem_data.gpio_host_wakeup)) break; mdelay(10); } } else { pr_err("mif: host wakeup is low\n"); } } int get_cp_active_state(void) { return gpio_get_value(lte_modem_data.gpio_phone_active); } void set_hsic_lpa_states(int states) { int val = gpio_get_value(lte_modem_data.gpio_cp_reset); pr_info("mif: %s: states = %d\n", __func__, states); if (val) { switch (states) { case STATE_HSIC_LPA_ENTER: /* gpio_set_value(lte_modem_data.gpio_link_active, 0); gpio_set_value(umts_modem_data.gpio_pda_active, 0); pr_info(LOG_TAG "set hsic lpa enter: " "active state (%d)" ", pda active (%d)\n", gpio_get_value( lte_modem_data.gpio_link_active), gpio_get_value(umts_modem_data.gpio_pda_active) ); */ break; case STATE_HSIC_LPA_WAKE: /* gpio_set_value(umts_modem_data.gpio_pda_active, 1); pr_info(LOG_TAG "set hsic lpa wake: " "pda active (%d)\n", gpio_get_value(umts_modem_data.gpio_pda_active) ); */ break; case STATE_HSIC_LPA_PHY_INIT: /* gpio_set_value(umts_modem_data.gpio_pda_active, 1); gpio_set_value(lte_modem_data.gpio_link_slavewake, 1); pr_info(LOG_TAG "set hsic lpa phy init: " "slave wake-up (%d)\n", gpio_get_value( lte_modem_data.gpio_link_slavewake) ); */ break; } } } /* lte_modem_wake must be registered before the ehci driver */ void __init modem_p8ltevzw_init(void) { lte_modem_wake.dev.platform_data = &lte_modem_data; platform_device_register(&lte_modem_wake); } static int __init init_modem(void) { pr_err("[MDM] <%s>\n", __func__); /* interanl dpram gpio configure */ idpram_gpio_init(); idpram_init(); config_cdma_modem_gpio(); platform_device_register(&cdma_modem); /* lte gpios configuration */ lte_modem_cfg_gpio(); platform_device_register(&lte_modem); return 0; } late_initcall(init_modem);
gpl-2.0
Hardslog/f2fs
fs/ocfs2/dlm/dlmunlock.c
1437
19468
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmunlock.c * * underlying calls for unlocking locks * * Copyright (C) 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> #include <linux/spinlock.h> #include <linux/delay.h> #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" #include "cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #define MLOG_MASK_PREFIX ML_DLM #include "cluster/masklog.h" #define DLM_UNLOCK_FREE_LOCK 0x00000001 #define DLM_UNLOCK_CALL_AST 0x00000002 #define DLM_UNLOCK_REMOVE_LOCK 0x00000004 #define DLM_UNLOCK_REGRANT_LOCK 0x00000008 #define DLM_UNLOCK_CLEAR_CONVERT_TYPE 0x00000010 static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int *actions); static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int *actions); static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int flags, u8 owner); /* * according to the spec: * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf * * flags & LKM_CANCEL != 0: must be converting or blocked * flags & LKM_CANCEL == 0: must be granted * * So to unlock a converting lock, you must first cancel the * convert (passing LKM_CANCEL in flags), then call the unlock * again (with no LKM_CANCEL in flags). */ /* * locking: * caller needs: none * taken: res->spinlock and lock->spinlock taken and dropped * held on exit: none * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network * all callers should have taken an extra ref on lock coming in */ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int flags, int *call_ast, int master_node) { enum dlm_status status; int actions = 0; int in_use; u8 owner; mlog(0, "master_node = %d, valblk = %d\n", master_node, flags & LKM_VALBLK); if (master_node) BUG_ON(res->owner != dlm->node_num); else BUG_ON(res->owner == dlm->node_num); spin_lock(&dlm->ast_lock); /* We want to be sure that we're not freeing a lock * that still has AST's pending... */ in_use = !list_empty(&lock->ast_list); spin_unlock(&dlm->ast_lock); if (in_use && !(flags & LKM_CANCEL)) { mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock " "while waiting for an ast!", res->lockname.len, res->lockname.name); return DLM_BADPARAM; } spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_IN_PROGRESS) { if (master_node && !(flags & LKM_CANCEL)) { mlog(ML_ERROR, "lockres in progress!\n"); spin_unlock(&res->spinlock); return DLM_FORWARD; } /* ok for this to sleep if not in a network handler */ __dlm_wait_on_lockres(res); res->state |= DLM_LOCK_RES_IN_PROGRESS; } spin_lock(&lock->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { status = DLM_RECOVERING; goto leave; } if (res->state & DLM_LOCK_RES_MIGRATING) { status = DLM_MIGRATING; goto leave; } /* see above for what the spec says about * LKM_CANCEL and the lock queue state */ if (flags & LKM_CANCEL) status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions); else status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node)) goto leave; /* By now this has been masked out of cancel requests. */ if (flags & LKM_VALBLK) { /* make the final update to the lvb */ if (master_node) memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN); else flags |= LKM_PUT_LVB; /* let the send function * handle it. */ } if (!master_node) { owner = res->owner; /* drop locks and send message */ if (flags & LKM_CANCEL) lock->cancel_pending = 1; else lock->unlock_pending = 1; spin_unlock(&lock->spinlock); spin_unlock(&res->spinlock); status = dlm_send_remote_unlock_request(dlm, res, lock, lksb, flags, owner); spin_lock(&res->spinlock); spin_lock(&lock->spinlock); /* if the master told us the lock was already granted, * let the ast handle all of these actions */ if (status == DLM_CANCELGRANT) { actions &= ~(DLM_UNLOCK_REMOVE_LOCK| DLM_UNLOCK_REGRANT_LOCK| DLM_UNLOCK_CLEAR_CONVERT_TYPE); } else if (status == DLM_RECOVERING || status == DLM_MIGRATING || status == DLM_FORWARD || status == DLM_NOLOCKMGR ) { /* must clear the actions because this unlock * is about to be retried. cannot free or do * any list manipulation. */ mlog(0, "%s:%.*s: clearing actions, %s\n", dlm->name, res->lockname.len, res->lockname.name, status==DLM_RECOVERING?"recovering": (status==DLM_MIGRATING?"migrating": (status == DLM_FORWARD ? "forward" : "nolockmanager"))); actions = 0; } if (flags & LKM_CANCEL) lock->cancel_pending = 0; else lock->unlock_pending = 0; } /* get an extra ref on lock. if we are just switching * lists here, we dont want the lock to go away. */ dlm_lock_get(lock); if (actions & DLM_UNLOCK_REMOVE_LOCK) { list_del_init(&lock->list); dlm_lock_put(lock); } if (actions & DLM_UNLOCK_REGRANT_LOCK) { dlm_lock_get(lock); list_add_tail(&lock->list, &res->granted); } if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) { mlog(0, "clearing convert_type at %smaster node\n", master_node ? "" : "non-"); lock->ml.convert_type = LKM_IVMODE; } /* remove the extra ref on lock */ dlm_lock_put(lock); leave: res->state &= ~DLM_LOCK_RES_IN_PROGRESS; if (!dlm_lock_on_list(&res->converting, lock)) BUG_ON(lock->ml.convert_type != LKM_IVMODE); else BUG_ON(lock->ml.convert_type == LKM_IVMODE); spin_unlock(&lock->spinlock); spin_unlock(&res->spinlock); wake_up(&res->wq); /* let the caller's final dlm_lock_put handle the actual kfree */ if (actions & DLM_UNLOCK_FREE_LOCK) { /* this should always be coupled with list removal */ BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK)); mlog(0, "lock %u:%llu should be gone now! refs=%d\n", dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), atomic_read(&lock->lock_refs.refcount)-1); dlm_lock_put(lock); } if (actions & DLM_UNLOCK_CALL_AST) *call_ast = 1; /* if cancel or unlock succeeded, lvb work is done */ if (status == DLM_NORMAL) lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); return status; } void dlm_commit_pending_unlock(struct dlm_lock_resource *res, struct dlm_lock *lock) { /* leave DLM_LKSB_PUT_LVB on the lksb so any final * update of the lvb will be sent to the new master */ list_del_init(&lock->list); } void dlm_commit_pending_cancel(struct dlm_lock_resource *res, struct dlm_lock *lock) { list_move_tail(&lock->list, &res->granted); lock->ml.convert_type = LKM_IVMODE; } static inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int flags, int *call_ast) { return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1); } static inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int flags, int *call_ast) { return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0); } /* * locking: * caller needs: none * taken: none * held on exit: none * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network */ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int flags, u8 owner) { struct dlm_unlock_lock unlock; int tmpret; enum dlm_status ret; int status = 0; struct kvec vec[2]; size_t veclen = 1; mlog(0, "%.*s\n", res->lockname.len, res->lockname.name); if (owner == dlm->node_num) { /* ended up trying to contact ourself. this means * that the lockres had been remote but became local * via a migration. just retry it, now as local */ mlog(0, "%s:%.*s: this node became the master due to a " "migration, re-evaluate now\n", dlm->name, res->lockname.len, res->lockname.name); return DLM_FORWARD; } memset(&unlock, 0, sizeof(unlock)); unlock.node_idx = dlm->node_num; unlock.flags = cpu_to_be32(flags); unlock.cookie = lock->ml.cookie; unlock.namelen = res->lockname.len; memcpy(unlock.name, res->lockname.name, unlock.namelen); vec[0].iov_len = sizeof(struct dlm_unlock_lock); vec[0].iov_base = &unlock; if (flags & LKM_PUT_LVB) { /* extra data to send if we are updating lvb */ vec[1].iov_len = DLM_LVB_LEN; vec[1].iov_base = lock->lksb->lvb; veclen++; } tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key, vec, veclen, owner, &status); if (tmpret >= 0) { // successfully sent and received if (status == DLM_FORWARD) mlog(0, "master was in-progress. retry\n"); ret = status; } else { mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " "node %u\n", tmpret, DLM_UNLOCK_LOCK_MSG, dlm->key, owner); if (dlm_is_host_down(tmpret)) { /* NOTE: this seems strange, but it is what we want. * when the master goes down during a cancel or * unlock, the recovery code completes the operation * as if the master had not died, then passes the * updated state to the recovery master. this thread * just needs to finish out the operation and call * the unlockast. */ if (dlm_is_node_dead(dlm, owner)) ret = DLM_NORMAL; else ret = DLM_NOLOCKMGR; } else { /* something bad. this will BUG in ocfs2 */ ret = dlm_err_to_dlm_status(tmpret); } } return ret; } /* * locking: * caller needs: none * taken: takes and drops res->spinlock * held on exit: none * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID, * return value from dlmunlock_master */ int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf; struct dlm_lock_resource *res = NULL; struct dlm_lock *lock = NULL; enum dlm_status status = DLM_NORMAL; int found = 0, i; struct dlm_lockstatus *lksb = NULL; int ignore; u32 flags; struct list_head *queue; flags = be32_to_cpu(unlock->flags); if (flags & LKM_GET_LVB) { mlog(ML_ERROR, "bad args! GET_LVB specified on unlock!\n"); return DLM_BADARGS; } if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) { mlog(ML_ERROR, "bad args! cannot modify lvb on a CANCEL " "request!\n"); return DLM_BADARGS; } if (unlock->namelen > DLM_LOCKID_NAME_MAX) { mlog(ML_ERROR, "Invalid name length in unlock handler!\n"); return DLM_IVBUFLEN; } if (!dlm_grab(dlm)) return DLM_REJECTED; mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), "Domain %s not fully joined!\n", dlm->name); mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none"); res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen); if (!res) { /* We assume here that a no lock resource simply means * it was migrated away and destroyed before the other * node could detect it. */ mlog(0, "returning DLM_FORWARD -- res no longer exists\n"); status = DLM_FORWARD; goto not_found; } queue=&res->granted; found = 0; spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { spin_unlock(&res->spinlock); mlog(0, "returning DLM_RECOVERING\n"); status = DLM_RECOVERING; goto leave; } if (res->state & DLM_LOCK_RES_MIGRATING) { spin_unlock(&res->spinlock); mlog(0, "returning DLM_MIGRATING\n"); status = DLM_MIGRATING; goto leave; } if (res->owner != dlm->node_num) { spin_unlock(&res->spinlock); mlog(0, "returning DLM_FORWARD -- not master\n"); status = DLM_FORWARD; goto leave; } for (i=0; i<3; i++) { list_for_each_entry(lock, queue, list) { if (lock->ml.cookie == unlock->cookie && lock->ml.node == unlock->node_idx) { dlm_lock_get(lock); found = 1; break; } } if (found) break; /* scan granted -> converting -> blocked queues */ queue++; } spin_unlock(&res->spinlock); if (!found) { status = DLM_IVLOCKID; goto not_found; } /* lock was found on queue */ lksb = lock->lksb; if (flags & (LKM_VALBLK|LKM_PUT_LVB) && lock->ml.type != LKM_EXMODE) flags &= ~(LKM_VALBLK|LKM_PUT_LVB); /* unlockast only called on originating node */ if (flags & LKM_PUT_LVB) { lksb->flags |= DLM_LKSB_PUT_LVB; memcpy(&lksb->lvb[0], &unlock->lvb[0], DLM_LVB_LEN); } /* if this is in-progress, propagate the DLM_FORWARD * all the way back out */ status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore); if (status == DLM_FORWARD) mlog(0, "lockres is in progress\n"); if (flags & LKM_PUT_LVB) lksb->flags &= ~DLM_LKSB_PUT_LVB; dlm_lockres_calc_usage(dlm, res); dlm_kick_thread(dlm, res); not_found: if (!found) mlog(ML_ERROR, "failed to find lock to unlock! " "cookie=%u:%llu\n", dlm_get_lock_cookie_node(be64_to_cpu(unlock->cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(unlock->cookie))); else dlm_lock_put(lock); leave: if (res) dlm_lockres_put(res); dlm_put(dlm); return status; } static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int *actions) { enum dlm_status status; if (dlm_lock_on_list(&res->blocked, lock)) { /* cancel this outright */ status = DLM_NORMAL; *actions = (DLM_UNLOCK_CALL_AST | DLM_UNLOCK_REMOVE_LOCK); } else if (dlm_lock_on_list(&res->converting, lock)) { /* cancel the request, put back on granted */ status = DLM_NORMAL; *actions = (DLM_UNLOCK_CALL_AST | DLM_UNLOCK_REMOVE_LOCK | DLM_UNLOCK_REGRANT_LOCK | DLM_UNLOCK_CLEAR_CONVERT_TYPE); } else if (dlm_lock_on_list(&res->granted, lock)) { /* too late, already granted. */ status = DLM_CANCELGRANT; *actions = DLM_UNLOCK_CALL_AST; } else { mlog(ML_ERROR, "lock to cancel is not on any list!\n"); status = DLM_IVLOCKID; *actions = 0; } return status; } static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, struct dlm_lockstatus *lksb, int *actions) { enum dlm_status status; /* unlock request */ if (!dlm_lock_on_list(&res->granted, lock)) { status = DLM_DENIED; dlm_error(status); *actions = 0; } else { /* unlock granted lock */ status = DLM_NORMAL; *actions = (DLM_UNLOCK_FREE_LOCK | DLM_UNLOCK_CALL_AST | DLM_UNLOCK_REMOVE_LOCK); } return status; } /* there seems to be no point in doing this async * since (even for the remote case) there is really * no work to queue up... so just do it and fire the * unlockast by hand when done... */ enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb, int flags, dlm_astunlockfunc_t *unlockast, void *data) { enum dlm_status status; struct dlm_lock_resource *res; struct dlm_lock *lock = NULL; int call_ast, is_master; if (!lksb) { dlm_error(DLM_BADARGS); return DLM_BADARGS; } if (flags & ~(LKM_CANCEL | LKM_VALBLK | LKM_INVVALBLK)) { dlm_error(DLM_BADPARAM); return DLM_BADPARAM; } if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) { mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n"); flags &= ~LKM_VALBLK; } if (!lksb->lockid || !lksb->lockid->lockres) { dlm_error(DLM_BADPARAM); return DLM_BADPARAM; } lock = lksb->lockid; BUG_ON(!lock); dlm_lock_get(lock); res = lock->lockres; BUG_ON(!res); dlm_lockres_get(res); retry: call_ast = 0; /* need to retry up here because owner may have changed */ mlog(0, "lock=%p res=%p\n", lock, res); spin_lock(&res->spinlock); is_master = (res->owner == dlm->node_num); if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE) flags &= ~LKM_VALBLK; spin_unlock(&res->spinlock); if (is_master) { status = dlmunlock_master(dlm, res, lock, lksb, flags, &call_ast); mlog(0, "done calling dlmunlock_master: returned %d, " "call_ast is %d\n", status, call_ast); } else { status = dlmunlock_remote(dlm, res, lock, lksb, flags, &call_ast); mlog(0, "done calling dlmunlock_remote: returned %d, " "call_ast is %d\n", status, call_ast); } if (status == DLM_RECOVERING || status == DLM_MIGRATING || status == DLM_FORWARD || status == DLM_NOLOCKMGR) { /* We want to go away for a tiny bit to allow recovery * / migration to complete on this resource. I don't * know of any wait queue we could sleep on as this * may be happening on another node. Perhaps the * proper solution is to queue up requests on the * other end? */ /* do we want to yield(); ?? */ msleep(50); mlog(0, "retrying unlock due to pending recovery/" "migration/in-progress/reconnect\n"); goto retry; } if (call_ast) { mlog(0, "calling unlockast(%p, %d)\n", data, status); if (is_master) { /* it is possible that there is one last bast * pending. make sure it is flushed, then * call the unlockast. * not an issue if this is a mastered remotely, * since this lock has been removed from the * lockres queues and cannot be found. */ dlm_kick_thread(dlm, NULL); wait_event(dlm->ast_wq, dlm_lock_basts_flushed(dlm, lock)); } (*unlockast)(data, status); } if (status == DLM_CANCELGRANT) status = DLM_NORMAL; if (status == DLM_NORMAL) { mlog(0, "kicking the thread\n"); dlm_kick_thread(dlm, res); } else dlm_error(status); dlm_lockres_calc_usage(dlm, res); dlm_lockres_put(res); dlm_lock_put(lock); mlog(0, "returning status=%d!\n", status); return status; } EXPORT_SYMBOL_GPL(dlmunlock);
gpl-2.0
SOKP/kernel_yu_msm8916
drivers/spi/spi-fsl-espi.c
2205
18087
/* * Freescale eSPI controller driver. * * Copyright 2010 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/spi/spi.h> #include <linux/platform_device.h> #include <linux/fsl_devices.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/err.h> #include <sysdev/fsl_soc.h> #include "spi-fsl-lib.h" /* eSPI Controller registers */ struct fsl_espi_reg { __be32 mode; /* 0x000 - eSPI mode register */ __be32 event; /* 0x004 - eSPI event register */ __be32 mask; /* 0x008 - eSPI mask register */ __be32 command; /* 0x00c - eSPI command register */ __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/ __be32 receive; /* 0x014 - eSPI receive FIFO access register*/ u8 res[8]; /* 0x018 - 0x01c reserved */ __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */ }; struct fsl_espi_transfer { const void *tx_buf; void *rx_buf; unsigned len; unsigned n_tx; unsigned n_rx; unsigned actual_length; int status; }; /* eSPI Controller mode register definitions */ #define SPMODE_ENABLE (1 << 31) #define SPMODE_LOOP (1 << 30) #define SPMODE_TXTHR(x) ((x) << 8) #define SPMODE_RXTHR(x) ((x) << 0) /* eSPI Controller CS mode register definitions */ #define CSMODE_CI_INACTIVEHIGH (1 << 31) #define CSMODE_CP_BEGIN_EDGECLK (1 << 30) #define CSMODE_REV (1 << 29) #define CSMODE_DIV16 (1 << 28) #define CSMODE_PM(x) ((x) << 24) #define CSMODE_POL_1 (1 << 20) #define CSMODE_LEN(x) ((x) << 16) #define CSMODE_BEF(x) ((x) << 12) #define CSMODE_AFT(x) ((x) << 8) #define CSMODE_CG(x) ((x) << 3) /* Default mode/csmode for eSPI controller */ #define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3)) #define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ | CSMODE_AFT(0) | CSMODE_CG(1)) /* SPIE register values */ #define SPIE_NE 0x00000200 /* Not empty */ #define SPIE_NF 0x00000100 /* Not full */ /* SPIM register values */ #define SPIM_NE 0x00000200 /* Not empty */ #define SPIM_NF 0x00000100 /* Not full */ #define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F) #define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F) /* SPCOM register values */ #define SPCOM_CS(x) ((x) << 30) #define SPCOM_TRANLEN(x) ((x) << 0) #define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ static void fsl_espi_change_mode(struct spi_device *spi) { struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); struct spi_mpc8xxx_cs *cs = spi->controller_state; struct fsl_espi_reg *reg_base = mspi->reg_base; __be32 __iomem *mode = &reg_base->csmode[spi->chip_select]; __be32 __iomem *espi_mode = &reg_base->mode; u32 tmp; unsigned long flags; /* Turn off IRQs locally to minimize time that SPI is disabled. */ local_irq_save(flags); /* Turn off SPI unit prior changing mode */ tmp = mpc8xxx_spi_read_reg(espi_mode); mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE); mpc8xxx_spi_write_reg(mode, cs->hw_mode); mpc8xxx_spi_write_reg(espi_mode, tmp); local_irq_restore(flags); } static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi) { u32 data; u16 data_h; u16 data_l; const u32 *tx = mpc8xxx_spi->tx; if (!tx) return 0; data = *tx++ << mpc8xxx_spi->tx_shift; data_l = data & 0xffff; data_h = (data >> 16) & 0xffff; swab16s(&data_l); swab16s(&data_h); data = data_h | data_l; mpc8xxx_spi->tx = tx; return data; } static int fsl_espi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); int bits_per_word = 0; u8 pm; u32 hz = 0; struct spi_mpc8xxx_cs *cs = spi->controller_state; if (t) { bits_per_word = t->bits_per_word; hz = t->speed_hz; } /* spi_transfer level calls that work per-word */ if (!bits_per_word) bits_per_word = spi->bits_per_word; /* Make sure its a bit width we support [4..16] */ if ((bits_per_word < 4) || (bits_per_word > 16)) return -EINVAL; if (!hz) hz = spi->max_speed_hz; cs->rx_shift = 0; cs->tx_shift = 0; cs->get_rx = mpc8xxx_spi_rx_buf_u32; cs->get_tx = mpc8xxx_spi_tx_buf_u32; if (bits_per_word <= 8) { cs->rx_shift = 8 - bits_per_word; } else if (bits_per_word <= 16) { cs->rx_shift = 16 - bits_per_word; if (spi->mode & SPI_LSB_FIRST) cs->get_tx = fsl_espi_tx_buf_lsb; } else { return -EINVAL; } mpc8xxx_spi->rx_shift = cs->rx_shift; mpc8xxx_spi->tx_shift = cs->tx_shift; mpc8xxx_spi->get_rx = cs->get_rx; mpc8xxx_spi->get_tx = cs->get_tx; bits_per_word = bits_per_word - 1; /* mask out bits we are going to set */ cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); cs->hw_mode |= CSMODE_LEN(bits_per_word); if ((mpc8xxx_spi->spibrg / hz) > 64) { cs->hw_mode |= CSMODE_DIV16; pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 16 * 4); WARN_ONCE(pm > 33, "%s: Requested speed is too low: %d Hz. " "Will use %d Hz instead.\n", dev_name(&spi->dev), hz, mpc8xxx_spi->spibrg / (4 * 16 * (32 + 1))); if (pm > 33) pm = 33; } else { pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 4); } if (pm) pm--; if (pm < 2) pm = 2; cs->hw_mode |= CSMODE_PM(pm); fsl_espi_change_mode(spi); return 0; } static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, unsigned int len) { u32 word; struct fsl_espi_reg *reg_base = mspi->reg_base; mspi->count = len; /* enable rx ints */ mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE); /* transmit word */ word = mspi->get_tx(mspi); mpc8xxx_spi_write_reg(&reg_base->transmit, word); return 0; } static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base; unsigned int len = t->len; u8 bits_per_word; int ret; bits_per_word = spi->bits_per_word; if (t->bits_per_word) bits_per_word = t->bits_per_word; mpc8xxx_spi->len = t->len; len = roundup(len, 4) / 4; mpc8xxx_spi->tx = t->tx_buf; mpc8xxx_spi->rx = t->rx_buf; INIT_COMPLETION(mpc8xxx_spi->done); /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ if ((t->len - 1) > SPCOM_TRANLEN_MAX) { dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" " beyond the SPCOM[TRANLEN] field\n", t->len); return -EINVAL; } mpc8xxx_spi_write_reg(&reg_base->command, (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1))); ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len); if (ret) return ret; wait_for_completion(&mpc8xxx_spi->done); /* disable rx ints */ mpc8xxx_spi_write_reg(&reg_base->mask, 0); return mpc8xxx_spi->count; } static inline void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd) { if (cmd) { cmd[1] = (u8)(addr >> 16); cmd[2] = (u8)(addr >> 8); cmd[3] = (u8)(addr >> 0); } } static inline unsigned int fsl_espi_cmd2addr(u8 *cmd) { if (cmd) return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0; return 0; } static void fsl_espi_do_trans(struct spi_message *m, struct fsl_espi_transfer *tr) { struct spi_device *spi = m->spi; struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); struct fsl_espi_transfer *espi_trans = tr; struct spi_message message; struct spi_transfer *t, *first, trans; int status = 0; spi_message_init(&message); memset(&trans, 0, sizeof(trans)); first = list_first_entry(&m->transfers, struct spi_transfer, transfer_list); list_for_each_entry(t, &m->transfers, transfer_list) { if ((first->bits_per_word != t->bits_per_word) || (first->speed_hz != t->speed_hz)) { espi_trans->status = -EINVAL; dev_err(mspi->dev, "bits_per_word/speed_hz should be" " same for the same SPI transfer\n"); return; } trans.speed_hz = t->speed_hz; trans.bits_per_word = t->bits_per_word; trans.delay_usecs = max(first->delay_usecs, t->delay_usecs); } trans.len = espi_trans->len; trans.tx_buf = espi_trans->tx_buf; trans.rx_buf = espi_trans->rx_buf; spi_message_add_tail(&trans, &message); list_for_each_entry(t, &message.transfers, transfer_list) { if (t->bits_per_word || t->speed_hz) { status = -EINVAL; status = fsl_espi_setup_transfer(spi, t); if (status < 0) break; } if (t->len) status = fsl_espi_bufs(spi, t); if (status) { status = -EMSGSIZE; break; } if (t->delay_usecs) udelay(t->delay_usecs); } espi_trans->status = status; fsl_espi_setup_transfer(spi, NULL); } static void fsl_espi_cmd_trans(struct spi_message *m, struct fsl_espi_transfer *trans, u8 *rx_buff) { struct spi_transfer *t; u8 *local_buf; int i = 0; struct fsl_espi_transfer *espi_trans = trans; local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); if (!local_buf) { espi_trans->status = -ENOMEM; return; } list_for_each_entry(t, &m->transfers, transfer_list) { if (t->tx_buf) { memcpy(local_buf + i, t->tx_buf, t->len); i += t->len; } } espi_trans->tx_buf = local_buf; espi_trans->rx_buf = local_buf + espi_trans->n_tx; fsl_espi_do_trans(m, espi_trans); espi_trans->actual_length = espi_trans->len; kfree(local_buf); } static void fsl_espi_rw_trans(struct spi_message *m, struct fsl_espi_transfer *trans, u8 *rx_buff) { struct fsl_espi_transfer *espi_trans = trans; unsigned int n_tx = espi_trans->n_tx; unsigned int n_rx = espi_trans->n_rx; struct spi_transfer *t; u8 *local_buf; u8 *rx_buf = rx_buff; unsigned int trans_len; unsigned int addr; int i, pos, loop; local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); if (!local_buf) { espi_trans->status = -ENOMEM; return; } for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { trans_len = n_rx - pos; if (trans_len > SPCOM_TRANLEN_MAX - n_tx) trans_len = SPCOM_TRANLEN_MAX - n_tx; i = 0; list_for_each_entry(t, &m->transfers, transfer_list) { if (t->tx_buf) { memcpy(local_buf + i, t->tx_buf, t->len); i += t->len; } } if (pos > 0) { addr = fsl_espi_cmd2addr(local_buf); addr += pos; fsl_espi_addr2cmd(addr, local_buf); } espi_trans->n_tx = n_tx; espi_trans->n_rx = trans_len; espi_trans->len = trans_len + n_tx; espi_trans->tx_buf = local_buf; espi_trans->rx_buf = local_buf + n_tx; fsl_espi_do_trans(m, espi_trans); memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); if (loop > 0) espi_trans->actual_length += espi_trans->len - n_tx; else espi_trans->actual_length += espi_trans->len; } kfree(local_buf); } static void fsl_espi_do_one_msg(struct spi_message *m) { struct spi_transfer *t; u8 *rx_buf = NULL; unsigned int n_tx = 0; unsigned int n_rx = 0; struct fsl_espi_transfer espi_trans; list_for_each_entry(t, &m->transfers, transfer_list) { if (t->tx_buf) n_tx += t->len; if (t->rx_buf) { n_rx += t->len; rx_buf = t->rx_buf; } } espi_trans.n_tx = n_tx; espi_trans.n_rx = n_rx; espi_trans.len = n_tx + n_rx; espi_trans.actual_length = 0; espi_trans.status = 0; if (!rx_buf) fsl_espi_cmd_trans(m, &espi_trans, NULL); else fsl_espi_rw_trans(m, &espi_trans, rx_buf); m->actual_length = espi_trans.actual_length; m->status = espi_trans.status; m->complete(m->context); } static int fsl_espi_setup(struct spi_device *spi) { struct mpc8xxx_spi *mpc8xxx_spi; struct fsl_espi_reg *reg_base; int retval; u32 hw_mode; u32 loop_mode; struct spi_mpc8xxx_cs *cs = spi->controller_state; if (!spi->max_speed_hz) return -EINVAL; if (!cs) { cs = kzalloc(sizeof *cs, GFP_KERNEL); if (!cs) return -ENOMEM; spi->controller_state = cs; } mpc8xxx_spi = spi_master_get_devdata(spi->master); reg_base = mpc8xxx_spi->reg_base; hw_mode = cs->hw_mode; /* Save original settings */ cs->hw_mode = mpc8xxx_spi_read_reg( &reg_base->csmode[spi->chip_select]); /* mask out bits we are going to set */ cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH | CSMODE_REV); if (spi->mode & SPI_CPHA) cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; if (spi->mode & SPI_CPOL) cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; if (!(spi->mode & SPI_LSB_FIRST)) cs->hw_mode |= CSMODE_REV; /* Handle the loop mode */ loop_mode = mpc8xxx_spi_read_reg(&reg_base->mode); loop_mode &= ~SPMODE_LOOP; if (spi->mode & SPI_LOOP) loop_mode |= SPMODE_LOOP; mpc8xxx_spi_write_reg(&reg_base->mode, loop_mode); retval = fsl_espi_setup_transfer(spi, NULL); if (retval < 0) { cs->hw_mode = hw_mode; /* Restore settings */ return retval; } return 0; } void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) { struct fsl_espi_reg *reg_base = mspi->reg_base; /* We need handle RX first */ if (events & SPIE_NE) { u32 rx_data, tmp; u8 rx_data_8; /* Spin until RX is done */ while (SPIE_RXCNT(events) < min(4, mspi->len)) { cpu_relax(); events = mpc8xxx_spi_read_reg(&reg_base->event); } if (mspi->len >= 4) { rx_data = mpc8xxx_spi_read_reg(&reg_base->receive); } else { tmp = mspi->len; rx_data = 0; while (tmp--) { rx_data_8 = in_8((u8 *)&reg_base->receive); rx_data |= (rx_data_8 << (tmp * 8)); } rx_data <<= (4 - mspi->len) * 8; } mspi->len -= 4; if (mspi->rx) mspi->get_rx(rx_data, mspi); } if (!(events & SPIE_NF)) { int ret; /* spin until TX is done */ ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg( &reg_base->event)) & SPIE_NF) == 0, 1000, 0); if (!ret) { dev_err(mspi->dev, "tired waiting for SPIE_NF\n"); return; } } /* Clear the events */ mpc8xxx_spi_write_reg(&reg_base->event, events); mspi->count -= 1; if (mspi->count) { u32 word = mspi->get_tx(mspi); mpc8xxx_spi_write_reg(&reg_base->transmit, word); } else { complete(&mspi->done); } } static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) { struct mpc8xxx_spi *mspi = context_data; struct fsl_espi_reg *reg_base = mspi->reg_base; irqreturn_t ret = IRQ_NONE; u32 events; /* Get interrupt events(tx/rx) */ events = mpc8xxx_spi_read_reg(&reg_base->event); if (events) ret = IRQ_HANDLED; dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events); fsl_espi_cpu_irq(mspi, events); return ret; } static void fsl_espi_remove(struct mpc8xxx_spi *mspi) { iounmap(mspi->reg_base); } static struct spi_master * fsl_espi_probe(struct device *dev, struct resource *mem, unsigned int irq) { struct fsl_spi_platform_data *pdata = dev->platform_data; struct spi_master *master; struct mpc8xxx_spi *mpc8xxx_spi; struct fsl_espi_reg *reg_base; u32 regval; int i, ret = 0; master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); if (!master) { ret = -ENOMEM; goto err; } dev_set_drvdata(dev, master); ret = mpc8xxx_spi_probe(dev, mem, irq); if (ret) goto err_probe; master->setup = fsl_espi_setup; mpc8xxx_spi = spi_master_get_devdata(master); mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg; mpc8xxx_spi->spi_remove = fsl_espi_remove; mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); if (!mpc8xxx_spi->reg_base) { ret = -ENOMEM; goto err_probe; } reg_base = mpc8xxx_spi->reg_base; /* Register for SPI Interrupt */ ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq, 0, "fsl_espi", mpc8xxx_spi); if (ret) goto free_irq; if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { mpc8xxx_spi->rx_shift = 16; mpc8xxx_spi->tx_shift = 24; } /* SPI controller initializations */ mpc8xxx_spi_write_reg(&reg_base->mode, 0); mpc8xxx_spi_write_reg(&reg_base->mask, 0); mpc8xxx_spi_write_reg(&reg_base->command, 0); mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff); /* Init eSPI CS mode register */ for (i = 0; i < pdata->max_chipselect; i++) mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL); /* Enable SPI interface */ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; mpc8xxx_spi_write_reg(&reg_base->mode, regval); ret = spi_register_master(master); if (ret < 0) goto unreg_master; dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq); return master; unreg_master: free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); free_irq: iounmap(mpc8xxx_spi->reg_base); err_probe: spi_master_put(master); err: return ERR_PTR(ret); } static int of_fsl_espi_get_chipselects(struct device *dev) { struct device_node *np = dev->of_node; struct fsl_spi_platform_data *pdata = dev->platform_data; const u32 *prop; int len; prop = of_get_property(np, "fsl,espi-num-chipselects", &len); if (!prop || len < sizeof(*prop)) { dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); return -EINVAL; } pdata->max_chipselect = *prop; pdata->cs_control = NULL; return 0; } static int of_fsl_espi_probe(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct device_node *np = ofdev->dev.of_node; struct spi_master *master; struct resource mem; struct resource irq; int ret = -ENOMEM; ret = of_mpc8xxx_spi_probe(ofdev); if (ret) return ret; ret = of_fsl_espi_get_chipselects(dev); if (ret) goto err; ret = of_address_to_resource(np, 0, &mem); if (ret) goto err; ret = of_irq_to_resource(np, 0, &irq); if (!ret) { ret = -EINVAL; goto err; } master = fsl_espi_probe(dev, &mem, irq.start); if (IS_ERR(master)) { ret = PTR_ERR(master); goto err; } return 0; err: return ret; } static int of_fsl_espi_remove(struct platform_device *dev) { return mpc8xxx_spi_remove(&dev->dev); } static const struct of_device_id of_fsl_espi_match[] = { { .compatible = "fsl,mpc8536-espi" }, {} }; MODULE_DEVICE_TABLE(of, of_fsl_espi_match); static struct platform_driver fsl_espi_driver = { .driver = { .name = "fsl_espi", .owner = THIS_MODULE, .of_match_table = of_fsl_espi_match, }, .probe = of_fsl_espi_probe, .remove = of_fsl_espi_remove, }; module_platform_driver(fsl_espi_driver); MODULE_AUTHOR("Mingkai Hu"); MODULE_DESCRIPTION("Enhanced Freescale SPI Driver"); MODULE_LICENSE("GPL");
gpl-2.0
SM-G920P/G920P-MM
arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
2205
3187
/* * Support for MicroBlaze PVR (processor version register) * * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2007 John Williams <john.williams@petalogix.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/string.h> #include <asm/pvr.h> #include <asm/cpuinfo.h> /* * Helper macro to map between fields in our struct cpuinfo, and * the PVR macros in pvr.h. */ #define CI(c, p) { ci->c = PVR_##p(pvr); } #if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE) #define err_printk(x) \ early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n"); #else #define err_printk(x) \ pr_info("ERROR: Microblaze " x "-different for PVR and DTS\n"); #endif void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) { struct pvr_s pvr; int temp; /* for saving temp value */ get_pvr(&pvr); CI(ver_code, VERSION); if (!ci->ver_code) { pr_err("ERROR: MB has broken PVR regs -> use DTS setting\n"); return; } temp = PVR_USE_BARREL(pvr) | PVR_USE_MSR_INSTR(pvr) | PVR_USE_PCMP_INSTR(pvr) | PVR_USE_DIV(pvr); if (ci->use_instr != temp) err_printk("BARREL, MSR, PCMP or DIV"); ci->use_instr = temp; temp = PVR_USE_HW_MUL(pvr) | PVR_USE_MUL64(pvr); if (ci->use_mult != temp) err_printk("HW_MUL"); ci->use_mult = temp; temp = PVR_USE_FPU(pvr) | PVR_USE_FPU2(pvr); if (ci->use_fpu != temp) err_printk("HW_FPU"); ci->use_fpu = temp; ci->use_exc = PVR_OPCODE_0x0_ILLEGAL(pvr) | PVR_UNALIGNED_EXCEPTION(pvr) | PVR_ILL_OPCODE_EXCEPTION(pvr) | PVR_IOPB_BUS_EXCEPTION(pvr) | PVR_DOPB_BUS_EXCEPTION(pvr) | PVR_DIV_ZERO_EXCEPTION(pvr) | PVR_FPU_EXCEPTION(pvr) | PVR_FSL_EXCEPTION(pvr); CI(pvr_user1, USER1); CI(pvr_user2, USER2); CI(mmu, USE_MMU); CI(mmu_privins, MMU_PRIVINS); CI(endian, ENDIAN); CI(use_icache, USE_ICACHE); CI(icache_tagbits, ICACHE_ADDR_TAG_BITS); CI(icache_write, ICACHE_ALLOW_WR); ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2; CI(icache_size, ICACHE_BYTE_SIZE); CI(icache_base, ICACHE_BASEADDR); CI(icache_high, ICACHE_HIGHADDR); CI(use_dcache, USE_DCACHE); CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS); CI(dcache_write, DCACHE_ALLOW_WR); ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2; CI(dcache_size, DCACHE_BYTE_SIZE); CI(dcache_base, DCACHE_BASEADDR); CI(dcache_high, DCACHE_HIGHADDR); temp = PVR_DCACHE_USE_WRITEBACK(pvr); if (ci->dcache_wb != temp) err_printk("DCACHE WB"); ci->dcache_wb = temp; CI(use_dopb, D_OPB); CI(use_iopb, I_OPB); CI(use_dlmb, D_LMB); CI(use_ilmb, I_LMB); CI(num_fsl, FSL_LINKS); CI(irq_edge, INTERRUPT_IS_EDGE); CI(irq_positive, EDGE_IS_POSITIVE); CI(area_optimised, AREA_OPTIMISED); CI(hw_debug, DEBUG_ENABLED); CI(num_pc_brk, NUMBER_OF_PC_BRK); CI(num_rd_brk, NUMBER_OF_RD_ADDR_BRK); CI(num_wr_brk, NUMBER_OF_WR_ADDR_BRK); CI(fpga_family_code, TARGET_FAMILY); /* take timebase-frequency from DTS */ ci->cpu_clock_freq = fcpu(cpu, "timebase-frequency"); }
gpl-2.0
ztemt/Z5_H112_kernel
drivers/mfd/omap-usb-host.c
4765
25117
/** * omap-usb-host.c - The USBHS core driver for OMAP EHCI & OHCI * * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com * Author: Keshava Munegowda <keshava_mgowda@ti.com> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 of * the License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <plat/cpu.h> #include <plat/usb.h> #include <linux/pm_runtime.h> #define USBHS_DRIVER_NAME "usbhs_omap" #define OMAP_EHCI_DEVICE "ehci-omap" #define OMAP_OHCI_DEVICE "ohci-omap3" /* OMAP USBHOST Register addresses */ /* TLL Register Set */ #define OMAP_USBTLL_REVISION (0x00) #define OMAP_USBTLL_SYSCONFIG (0x10) #define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8) #define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3) #define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2) #define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1) #define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0) #define OMAP_USBTLL_SYSSTATUS (0x14) #define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0) #define OMAP_USBTLL_IRQSTATUS (0x18) #define OMAP_USBTLL_IRQENABLE (0x1C) #define OMAP_TLL_SHARED_CONF (0x30) #define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6) #define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5) #define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2) #define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1) #define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0) #define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num) #define OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT 24 #define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11) #define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10) #define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9) #define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8) #define OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS (1 << 1) #define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0) #define OMAP_TLL_FSLSMODE_6PIN_PHY_DAT_SE0 0x0 #define OMAP_TLL_FSLSMODE_6PIN_PHY_DP_DM 0x1 #define OMAP_TLL_FSLSMODE_3PIN_PHY 0x2 #define OMAP_TLL_FSLSMODE_4PIN_PHY 0x3 #define OMAP_TLL_FSLSMODE_6PIN_TLL_DAT_SE0 0x4 #define OMAP_TLL_FSLSMODE_6PIN_TLL_DP_DM 0x5 #define OMAP_TLL_FSLSMODE_3PIN_TLL 0x6 #define OMAP_TLL_FSLSMODE_4PIN_TLL 0x7 #define OMAP_TLL_FSLSMODE_2PIN_TLL_DAT_SE0 0xA #define OMAP_TLL_FSLSMODE_2PIN_DAT_DP_DM 0xB #define OMAP_TLL_ULPI_FUNCTION_CTRL(num) (0x804 + 0x100 * num) #define OMAP_TLL_ULPI_INTERFACE_CTRL(num) (0x807 + 0x100 * num) #define OMAP_TLL_ULPI_OTG_CTRL(num) (0x80A + 0x100 * num) #define OMAP_TLL_ULPI_INT_EN_RISE(num) (0x80D + 0x100 * num) #define OMAP_TLL_ULPI_INT_EN_FALL(num) (0x810 + 0x100 * num) #define OMAP_TLL_ULPI_INT_STATUS(num) (0x813 + 0x100 * num) #define OMAP_TLL_ULPI_INT_LATCH(num) (0x814 + 0x100 * num) #define OMAP_TLL_ULPI_DEBUG(num) (0x815 + 0x100 * num) #define OMAP_TLL_ULPI_SCRATCH_REGISTER(num) (0x816 + 0x100 * num) #define OMAP_TLL_CHANNEL_COUNT 3 #define OMAP_TLL_CHANNEL_1_EN_MASK (1 << 0) #define OMAP_TLL_CHANNEL_2_EN_MASK (1 << 1) #define OMAP_TLL_CHANNEL_3_EN_MASK (1 << 2) /* UHH Register Set */ #define OMAP_UHH_REVISION (0x00) #define OMAP_UHH_SYSCONFIG (0x10) #define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12) #define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8) #define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3) #define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2) #define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1) #define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0) #define OMAP_UHH_SYSSTATUS (0x14) #define OMAP_UHH_HOSTCONFIG (0x40) #define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0) #define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0) #define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11) #define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12) #define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2) #define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3) #define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4) #define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5) #define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8) #define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9) #define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10) #define OMAP4_UHH_HOSTCONFIG_APP_START_CLK (1 << 31) /* OMAP4-specific defines */ #define OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR (3 << 2) #define OMAP4_UHH_SYSCONFIG_NOIDLE (1 << 2) #define OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR (3 << 4) #define OMAP4_UHH_SYSCONFIG_NOSTDBY (1 << 4) #define OMAP4_UHH_SYSCONFIG_SOFTRESET (1 << 0) #define OMAP4_P1_MODE_CLEAR (3 << 16) #define OMAP4_P1_MODE_TLL (1 << 16) #define OMAP4_P1_MODE_HSIC (3 << 16) #define OMAP4_P2_MODE_CLEAR (3 << 18) #define OMAP4_P2_MODE_TLL (1 << 18) #define OMAP4_P2_MODE_HSIC (3 << 18) #define OMAP_REV2_TLL_CHANNEL_COUNT 2 #define OMAP_UHH_DEBUG_CSR (0x44) /* Values of UHH_REVISION - Note: these are not given in the TRM */ #define OMAP_USBHS_REV1 0x00000010 /* OMAP3 */ #define OMAP_USBHS_REV2 0x50700100 /* OMAP4 */ #define is_omap_usbhs_rev1(x) (x->usbhs_rev == OMAP_USBHS_REV1) #define is_omap_usbhs_rev2(x) (x->usbhs_rev == OMAP_USBHS_REV2) #define is_ehci_phy_mode(x) (x == OMAP_EHCI_PORT_MODE_PHY) #define is_ehci_tll_mode(x) (x == OMAP_EHCI_PORT_MODE_TLL) #define is_ehci_hsic_mode(x) (x == OMAP_EHCI_PORT_MODE_HSIC) struct usbhs_hcd_omap { struct clk *xclk60mhsp1_ck; struct clk *xclk60mhsp2_ck; struct clk *utmi_p1_fck; struct clk *usbhost_p1_fck; struct clk *usbtll_p1_fck; struct clk *utmi_p2_fck; struct clk *usbhost_p2_fck; struct clk *usbtll_p2_fck; struct clk *init_60m_fclk; struct clk *ehci_logic_fck; void __iomem *uhh_base; void __iomem *tll_base; struct usbhs_omap_platform_data platdata; u32 usbhs_rev; spinlock_t lock; }; /*-------------------------------------------------------------------------*/ const char usbhs_driver_name[] = USBHS_DRIVER_NAME; static u64 usbhs_dmamask = DMA_BIT_MASK(32); /*-------------------------------------------------------------------------*/ static inline void usbhs_write(void __iomem *base, u32 reg, u32 val) { __raw_writel(val, base + reg); } static inline u32 usbhs_read(void __iomem *base, u32 reg) { return __raw_readl(base + reg); } static inline void usbhs_writeb(void __iomem *base, u8 reg, u8 val) { __raw_writeb(val, base + reg); } static inline u8 usbhs_readb(void __iomem *base, u8 reg) { return __raw_readb(base + reg); } /*-------------------------------------------------------------------------*/ static struct platform_device *omap_usbhs_alloc_child(const char *name, struct resource *res, int num_resources, void *pdata, size_t pdata_size, struct device *dev) { struct platform_device *child; int ret; child = platform_device_alloc(name, 0); if (!child) { dev_err(dev, "platform_device_alloc %s failed\n", name); goto err_end; } ret = platform_device_add_resources(child, res, num_resources); if (ret) { dev_err(dev, "platform_device_add_resources failed\n"); goto err_alloc; } ret = platform_device_add_data(child, pdata, pdata_size); if (ret) { dev_err(dev, "platform_device_add_data failed\n"); goto err_alloc; } child->dev.dma_mask = &usbhs_dmamask; dma_set_coherent_mask(&child->dev, DMA_BIT_MASK(32)); child->dev.parent = dev; ret = platform_device_add(child); if (ret) { dev_err(dev, "platform_device_add failed\n"); goto err_alloc; } return child; err_alloc: platform_device_put(child); err_end: return NULL; } static int omap_usbhs_alloc_children(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct usbhs_hcd_omap *omap; struct ehci_hcd_omap_platform_data *ehci_data; struct ohci_hcd_omap_platform_data *ohci_data; struct platform_device *ehci; struct platform_device *ohci; struct resource *res; struct resource resources[2]; int ret; omap = platform_get_drvdata(pdev); ehci_data = omap->platdata.ehci_data; ohci_data = omap->platdata.ohci_data; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ehci"); if (!res) { dev_err(dev, "EHCI get resource IORESOURCE_MEM failed\n"); ret = -ENODEV; goto err_end; } resources[0] = *res; res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ehci-irq"); if (!res) { dev_err(dev, " EHCI get resource IORESOURCE_IRQ failed\n"); ret = -ENODEV; goto err_end; } resources[1] = *res; ehci = omap_usbhs_alloc_child(OMAP_EHCI_DEVICE, resources, 2, ehci_data, sizeof(*ehci_data), dev); if (!ehci) { dev_err(dev, "omap_usbhs_alloc_child failed\n"); ret = -ENOMEM; goto err_end; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ohci"); if (!res) { dev_err(dev, "OHCI get resource IORESOURCE_MEM failed\n"); ret = -ENODEV; goto err_ehci; } resources[0] = *res; res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ohci-irq"); if (!res) { dev_err(dev, "OHCI get resource IORESOURCE_IRQ failed\n"); ret = -ENODEV; goto err_ehci; } resources[1] = *res; ohci = omap_usbhs_alloc_child(OMAP_OHCI_DEVICE, resources, 2, ohci_data, sizeof(*ohci_data), dev); if (!ohci) { dev_err(dev, "omap_usbhs_alloc_child failed\n"); ret = -ENOMEM; goto err_ehci; } return 0; err_ehci: platform_device_unregister(ehci); err_end: return ret; } static bool is_ohci_port(enum usbhs_omap_port_mode pmode) { switch (pmode) { case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM: case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0: case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM: case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0: case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM: case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0: case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM: case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0: case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM: return true; default: return false; } } /* * convert the port-mode enum to a value we can use in the FSLSMODE * field of USBTLL_CHANNEL_CONF */ static unsigned ohci_omap3_fslsmode(enum usbhs_omap_port_mode mode) { switch (mode) { case OMAP_USBHS_PORT_MODE_UNUSED: case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0: return OMAP_TLL_FSLSMODE_6PIN_PHY_DAT_SE0; case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM: return OMAP_TLL_FSLSMODE_6PIN_PHY_DP_DM; case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0: return OMAP_TLL_FSLSMODE_3PIN_PHY; case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM: return OMAP_TLL_FSLSMODE_4PIN_PHY; case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0: return OMAP_TLL_FSLSMODE_6PIN_TLL_DAT_SE0; case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM: return OMAP_TLL_FSLSMODE_6PIN_TLL_DP_DM; case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0: return OMAP_TLL_FSLSMODE_3PIN_TLL; case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM: return OMAP_TLL_FSLSMODE_4PIN_TLL; case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0: return OMAP_TLL_FSLSMODE_2PIN_TLL_DAT_SE0; case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM: return OMAP_TLL_FSLSMODE_2PIN_DAT_DP_DM; default: pr_warning("Invalid port mode, using default\n"); return OMAP_TLL_FSLSMODE_6PIN_PHY_DAT_SE0; } } static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count) { struct usbhs_hcd_omap *omap = dev_get_drvdata(dev); struct usbhs_omap_platform_data *pdata = dev->platform_data; unsigned reg; int i; /* Program Common TLL register */ reg = usbhs_read(omap->tll_base, OMAP_TLL_SHARED_CONF); reg |= (OMAP_TLL_SHARED_CONF_FCLK_IS_ON | OMAP_TLL_SHARED_CONF_USB_DIVRATION); reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN; reg &= ~OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN; usbhs_write(omap->tll_base, OMAP_TLL_SHARED_CONF, reg); /* Enable channels now */ for (i = 0; i < tll_channel_count; i++) { reg = usbhs_read(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i)); if (is_ohci_port(pdata->port_mode[i])) { reg |= ohci_omap3_fslsmode(pdata->port_mode[i]) << OMAP_TLL_CHANNEL_CONF_FSLSMODE_SHIFT; reg |= OMAP_TLL_CHANNEL_CONF_CHANMODE_FSLS; } else if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_TLL) { /* Disable AutoIdle, BitStuffing and use SDR Mode */ reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); } else continue; reg |= OMAP_TLL_CHANNEL_CONF_CHANEN; usbhs_write(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg); usbhs_writeb(omap->tll_base, OMAP_TLL_ULPI_SCRATCH_REGISTER(i), 0xbe); } } static int usbhs_runtime_resume(struct device *dev) { struct usbhs_hcd_omap *omap = dev_get_drvdata(dev); struct usbhs_omap_platform_data *pdata = &omap->platdata; unsigned long flags; dev_dbg(dev, "usbhs_runtime_resume\n"); if (!pdata) { dev_dbg(dev, "missing platform_data\n"); return -ENODEV; } spin_lock_irqsave(&omap->lock, flags); if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck)) clk_enable(omap->ehci_logic_fck); if (is_ehci_tll_mode(pdata->port_mode[0])) { clk_enable(omap->usbhost_p1_fck); clk_enable(omap->usbtll_p1_fck); } if (is_ehci_tll_mode(pdata->port_mode[1])) { clk_enable(omap->usbhost_p2_fck); clk_enable(omap->usbtll_p2_fck); } clk_enable(omap->utmi_p1_fck); clk_enable(omap->utmi_p2_fck); spin_unlock_irqrestore(&omap->lock, flags); return 0; } static int usbhs_runtime_suspend(struct device *dev) { struct usbhs_hcd_omap *omap = dev_get_drvdata(dev); struct usbhs_omap_platform_data *pdata = &omap->platdata; unsigned long flags; dev_dbg(dev, "usbhs_runtime_suspend\n"); if (!pdata) { dev_dbg(dev, "missing platform_data\n"); return -ENODEV; } spin_lock_irqsave(&omap->lock, flags); if (is_ehci_tll_mode(pdata->port_mode[0])) { clk_disable(omap->usbhost_p1_fck); clk_disable(omap->usbtll_p1_fck); } if (is_ehci_tll_mode(pdata->port_mode[1])) { clk_disable(omap->usbhost_p2_fck); clk_disable(omap->usbtll_p2_fck); } clk_disable(omap->utmi_p2_fck); clk_disable(omap->utmi_p1_fck); if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck)) clk_disable(omap->ehci_logic_fck); spin_unlock_irqrestore(&omap->lock, flags); return 0; } static void omap_usbhs_init(struct device *dev) { struct usbhs_hcd_omap *omap = dev_get_drvdata(dev); struct usbhs_omap_platform_data *pdata = &omap->platdata; unsigned long flags; unsigned reg; dev_dbg(dev, "starting TI HSUSB Controller\n"); pm_runtime_get_sync(dev); spin_lock_irqsave(&omap->lock, flags); omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION); dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev); reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG); /* setup ULPI bypass and burst configurations */ reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN); reg |= OMAP4_UHH_HOSTCONFIG_APP_START_CLK; reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN; if (is_omap_usbhs_rev1(omap)) { if (pdata->port_mode[0] == OMAP_USBHS_PORT_MODE_UNUSED) reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS; if (pdata->port_mode[1] == OMAP_USBHS_PORT_MODE_UNUSED) reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS; if (pdata->port_mode[2] == OMAP_USBHS_PORT_MODE_UNUSED) reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS; /* Bypass the TLL module for PHY mode operation */ if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) { dev_dbg(dev, "OMAP3 ES version <= ES2.1\n"); if (is_ehci_phy_mode(pdata->port_mode[0]) || is_ehci_phy_mode(pdata->port_mode[1]) || is_ehci_phy_mode(pdata->port_mode[2])) reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; else reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS; } else { dev_dbg(dev, "OMAP3 ES version > ES2.1\n"); if (is_ehci_phy_mode(pdata->port_mode[0])) reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; else reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS; if (is_ehci_phy_mode(pdata->port_mode[1])) reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; else reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS; if (is_ehci_phy_mode(pdata->port_mode[2])) reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; else reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS; } } else if (is_omap_usbhs_rev2(omap)) { /* Clear port mode fields for PHY mode*/ reg &= ~OMAP4_P1_MODE_CLEAR; reg &= ~OMAP4_P2_MODE_CLEAR; if (is_ehci_tll_mode(pdata->port_mode[0]) || (is_ohci_port(pdata->port_mode[0]))) reg |= OMAP4_P1_MODE_TLL; else if (is_ehci_hsic_mode(pdata->port_mode[0])) reg |= OMAP4_P1_MODE_HSIC; if (is_ehci_tll_mode(pdata->port_mode[1]) || (is_ohci_port(pdata->port_mode[1]))) reg |= OMAP4_P2_MODE_TLL; else if (is_ehci_hsic_mode(pdata->port_mode[1])) reg |= OMAP4_P2_MODE_HSIC; } usbhs_write(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg); dev_dbg(dev, "UHH setup done, uhh_hostconfig=%x\n", reg); if (is_ehci_tll_mode(pdata->port_mode[0]) || is_ehci_tll_mode(pdata->port_mode[1]) || is_ehci_tll_mode(pdata->port_mode[2]) || (is_ohci_port(pdata->port_mode[0])) || (is_ohci_port(pdata->port_mode[1])) || (is_ohci_port(pdata->port_mode[2]))) { /* Enable UTMI mode for required TLL channels */ if (is_omap_usbhs_rev2(omap)) usbhs_omap_tll_init(dev, OMAP_REV2_TLL_CHANNEL_COUNT); else usbhs_omap_tll_init(dev, OMAP_TLL_CHANNEL_COUNT); } spin_unlock_irqrestore(&omap->lock, flags); pm_runtime_put_sync(dev); } /** * usbhs_omap_probe - initialize TI-based HCDs * * Allocates basic resources for this USB host controller. */ static int __devinit usbhs_omap_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct usbhs_omap_platform_data *pdata = dev->platform_data; struct usbhs_hcd_omap *omap; struct resource *res; int ret = 0; int i; if (!pdata) { dev_err(dev, "Missing platform data\n"); ret = -ENOMEM; goto end_probe; } omap = kzalloc(sizeof(*omap), GFP_KERNEL); if (!omap) { dev_err(dev, "Memory allocation failed\n"); ret = -ENOMEM; goto end_probe; } spin_lock_init(&omap->lock); for (i = 0; i < OMAP3_HS_USB_PORTS; i++) omap->platdata.port_mode[i] = pdata->port_mode[i]; omap->platdata.ehci_data = pdata->ehci_data; omap->platdata.ohci_data = pdata->ohci_data; pm_runtime_enable(dev); for (i = 0; i < OMAP3_HS_USB_PORTS; i++) if (is_ehci_phy_mode(i) || is_ehci_tll_mode(i) || is_ehci_hsic_mode(i)) { omap->ehci_logic_fck = clk_get(dev, "ehci_logic_fck"); if (IS_ERR(omap->ehci_logic_fck)) { ret = PTR_ERR(omap->ehci_logic_fck); dev_warn(dev, "ehci_logic_fck failed:%d\n", ret); } break; } omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); if (IS_ERR(omap->utmi_p1_fck)) { ret = PTR_ERR(omap->utmi_p1_fck); dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret); goto err_end; } omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); if (IS_ERR(omap->xclk60mhsp1_ck)) { ret = PTR_ERR(omap->xclk60mhsp1_ck); dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret); goto err_utmi_p1_fck; } omap->utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk"); if (IS_ERR(omap->utmi_p2_fck)) { ret = PTR_ERR(omap->utmi_p2_fck); dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret); goto err_xclk60mhsp1_ck; } omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck"); if (IS_ERR(omap->xclk60mhsp2_ck)) { ret = PTR_ERR(omap->xclk60mhsp2_ck); dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret); goto err_utmi_p2_fck; } omap->usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk"); if (IS_ERR(omap->usbhost_p1_fck)) { ret = PTR_ERR(omap->usbhost_p1_fck); dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret); goto err_xclk60mhsp2_ck; } omap->usbtll_p1_fck = clk_get(dev, "usb_tll_hs_usb_ch0_clk"); if (IS_ERR(omap->usbtll_p1_fck)) { ret = PTR_ERR(omap->usbtll_p1_fck); dev_err(dev, "usbtll_p1_fck failed error:%d\n", ret); goto err_usbhost_p1_fck; } omap->usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk"); if (IS_ERR(omap->usbhost_p2_fck)) { ret = PTR_ERR(omap->usbhost_p2_fck); dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret); goto err_usbtll_p1_fck; } omap->usbtll_p2_fck = clk_get(dev, "usb_tll_hs_usb_ch1_clk"); if (IS_ERR(omap->usbtll_p2_fck)) { ret = PTR_ERR(omap->usbtll_p2_fck); dev_err(dev, "usbtll_p2_fck failed error:%d\n", ret); goto err_usbhost_p2_fck; } omap->init_60m_fclk = clk_get(dev, "init_60m_fclk"); if (IS_ERR(omap->init_60m_fclk)) { ret = PTR_ERR(omap->init_60m_fclk); dev_err(dev, "init_60m_fclk failed error:%d\n", ret); goto err_usbtll_p2_fck; } if (is_ehci_phy_mode(pdata->port_mode[0])) { /* for OMAP3 , the clk set paretn fails */ ret = clk_set_parent(omap->utmi_p1_fck, omap->xclk60mhsp1_ck); if (ret != 0) dev_err(dev, "xclk60mhsp1_ck set parent" "failed error:%d\n", ret); } else if (is_ehci_tll_mode(pdata->port_mode[0])) { ret = clk_set_parent(omap->utmi_p1_fck, omap->init_60m_fclk); if (ret != 0) dev_err(dev, "init_60m_fclk set parent" "failed error:%d\n", ret); } if (is_ehci_phy_mode(pdata->port_mode[1])) { ret = clk_set_parent(omap->utmi_p2_fck, omap->xclk60mhsp2_ck); if (ret != 0) dev_err(dev, "xclk60mhsp2_ck set parent" "failed error:%d\n", ret); } else if (is_ehci_tll_mode(pdata->port_mode[1])) { ret = clk_set_parent(omap->utmi_p2_fck, omap->init_60m_fclk); if (ret != 0) dev_err(dev, "init_60m_fclk set parent" "failed error:%d\n", ret); } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh"); if (!res) { dev_err(dev, "UHH EHCI get resource failed\n"); ret = -ENODEV; goto err_init_60m_fclk; } omap->uhh_base = ioremap(res->start, resource_size(res)); if (!omap->uhh_base) { dev_err(dev, "UHH ioremap failed\n"); ret = -ENOMEM; goto err_init_60m_fclk; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tll"); if (!res) { dev_err(dev, "UHH EHCI get resource failed\n"); ret = -ENODEV; goto err_tll; } omap->tll_base = ioremap(res->start, resource_size(res)); if (!omap->tll_base) { dev_err(dev, "TLL ioremap failed\n"); ret = -ENOMEM; goto err_tll; } platform_set_drvdata(pdev, omap); omap_usbhs_init(dev); ret = omap_usbhs_alloc_children(pdev); if (ret) { dev_err(dev, "omap_usbhs_alloc_children failed\n"); goto err_alloc; } goto end_probe; err_alloc: iounmap(omap->tll_base); err_tll: iounmap(omap->uhh_base); err_init_60m_fclk: clk_put(omap->init_60m_fclk); err_usbtll_p2_fck: clk_put(omap->usbtll_p2_fck); err_usbhost_p2_fck: clk_put(omap->usbhost_p2_fck); err_usbtll_p1_fck: clk_put(omap->usbtll_p1_fck); err_usbhost_p1_fck: clk_put(omap->usbhost_p1_fck); err_xclk60mhsp2_ck: clk_put(omap->xclk60mhsp2_ck); err_utmi_p2_fck: clk_put(omap->utmi_p2_fck); err_xclk60mhsp1_ck: clk_put(omap->xclk60mhsp1_ck); err_utmi_p1_fck: clk_put(omap->utmi_p1_fck); err_end: clk_put(omap->ehci_logic_fck); pm_runtime_disable(dev); kfree(omap); end_probe: return ret; } /** * usbhs_omap_remove - shutdown processing for UHH & TLL HCDs * @pdev: USB Host Controller being removed * * Reverses the effect of usbhs_omap_probe(). */ static int __devexit usbhs_omap_remove(struct platform_device *pdev) { struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev); iounmap(omap->tll_base); iounmap(omap->uhh_base); clk_put(omap->init_60m_fclk); clk_put(omap->usbtll_p2_fck); clk_put(omap->usbhost_p2_fck); clk_put(omap->usbtll_p1_fck); clk_put(omap->usbhost_p1_fck); clk_put(omap->xclk60mhsp2_ck); clk_put(omap->utmi_p2_fck); clk_put(omap->xclk60mhsp1_ck); clk_put(omap->utmi_p1_fck); clk_put(omap->ehci_logic_fck); pm_runtime_disable(&pdev->dev); kfree(omap); return 0; } static const struct dev_pm_ops usbhsomap_dev_pm_ops = { .runtime_suspend = usbhs_runtime_suspend, .runtime_resume = usbhs_runtime_resume, }; static struct platform_driver usbhs_omap_driver = { .driver = { .name = (char *)usbhs_driver_name, .owner = THIS_MODULE, .pm = &usbhsomap_dev_pm_ops, }, .remove = __exit_p(usbhs_omap_remove), }; MODULE_AUTHOR("Keshava Munegowda <keshava_mgowda@ti.com>"); MODULE_ALIAS("platform:" USBHS_DRIVER_NAME); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI"); static int __init omap_usbhs_drvinit(void) { return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe); } /* * init before ehci and ohci drivers; * The usbhs core driver should be initialized much before * the omap ehci and ohci probe functions are called. */ fs_initcall(omap_usbhs_drvinit); static void __exit omap_usbhs_drvexit(void) { platform_driver_unregister(&usbhs_omap_driver); } module_exit(omap_usbhs_drvexit);
gpl-2.0
jstotero/Cucciolone-Rewrite
drivers/isdn/hysdn/boardergo.c
5021
16200
/* $Id: boardergo.c,v 1.5.6.7 2001/11/06 21:58:19 kai Exp $ * * Linux driver for HYSDN cards, specific routines for ergo type boards. * * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH * Copyright 1999 by Werner Cornelius (werner@titro.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * As all Linux supported cards Champ2, Ergo and Metro2/4 use the same * DPRAM interface and layout with only minor differences all related * stuff is done here, not in separate modules. * */ #include <linux/signal.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <asm/io.h> #include "hysdn_defs.h" #include "boardergo.h" #define byteout(addr,val) outb(val,addr) #define bytein(addr) inb(addr) /***************************************************/ /* The cards interrupt handler. Called from system */ /***************************************************/ static irqreturn_t ergo_interrupt(int intno, void *dev_id) { hysdn_card *card = dev_id; /* parameter from irq */ tErgDpram *dpr; unsigned long flags; unsigned char volatile b; if (!card) return IRQ_NONE; /* error -> spurious interrupt */ if (!card->irq_enabled) return IRQ_NONE; /* other device interrupting or irq switched off */ spin_lock_irqsave(&card->hysdn_lock, flags); /* no further irqs allowed */ if (!(bytein(card->iobase + PCI9050_INTR_REG) & PCI9050_INTR_REG_STAT1)) { spin_unlock_irqrestore(&card->hysdn_lock, flags); /* restore old state */ return IRQ_NONE; /* no interrupt requested by E1 */ } /* clear any pending ints on the board */ dpr = card->dpram; b = dpr->ToPcInt; /* clear for ergo */ b |= dpr->ToPcIntMetro; /* same for metro */ b |= dpr->ToHyInt; /* and for champ */ /* start kernel task immediately after leaving all interrupts */ if (!card->hw_lock) schedule_work(&card->irq_queue); spin_unlock_irqrestore(&card->hysdn_lock, flags); return IRQ_HANDLED; } /* ergo_interrupt */ /******************************************************************************/ /* ergo_irq_bh will be called as part of the kernel clearing its shared work */ /* queue sometime after a call to schedule_work has been made passing our */ /* work_struct. This task is the only one handling data transfer from or to */ /* the card after booting. The task may be queued from everywhere */ /* (interrupts included). */ /******************************************************************************/ static void ergo_irq_bh(struct work_struct *ugli_api) { hysdn_card * card = container_of(ugli_api, hysdn_card, irq_queue); tErgDpram *dpr; int again; unsigned long flags; if (card->state != CARD_STATE_RUN) return; /* invalid call */ dpr = card->dpram; /* point to DPRAM */ spin_lock_irqsave(&card->hysdn_lock, flags); if (card->hw_lock) { spin_unlock_irqrestore(&card->hysdn_lock, flags); /* hardware currently unavailable */ return; } card->hw_lock = 1; /* we now lock the hardware */ do { again = 0; /* assume loop not to be repeated */ if (!dpr->ToHyFlag) { /* we are able to send a buffer */ if (hysdn_sched_tx(card, dpr->ToHyBuf, &dpr->ToHySize, &dpr->ToHyChannel, ERG_TO_HY_BUF_SIZE)) { dpr->ToHyFlag = 1; /* enable tx */ again = 1; /* restart loop */ } } /* we are able to send a buffer */ if (dpr->ToPcFlag) { /* a message has arrived for us, handle it */ if (hysdn_sched_rx(card, dpr->ToPcBuf, dpr->ToPcSize, dpr->ToPcChannel)) { dpr->ToPcFlag = 0; /* we worked the data */ again = 1; /* restart loop */ } } /* a message has arrived for us */ if (again) { dpr->ToHyInt = 1; dpr->ToPcInt = 1; /* interrupt to E1 for all cards */ } else card->hw_lock = 0; /* free hardware again */ } while (again); /* until nothing more to do */ spin_unlock_irqrestore(&card->hysdn_lock, flags); } /* ergo_irq_bh */ /*********************************************************/ /* stop the card (hardware reset) and disable interrupts */ /*********************************************************/ static void ergo_stopcard(hysdn_card * card) { unsigned long flags; unsigned char val; hysdn_net_release(card); /* first release the net device if existing */ #ifdef CONFIG_HYSDN_CAPI hycapi_capi_stop(card); #endif /* CONFIG_HYSDN_CAPI */ spin_lock_irqsave(&card->hysdn_lock, flags); val = bytein(card->iobase + PCI9050_INTR_REG); /* get actual value */ val &= ~(PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1); /* mask irq */ byteout(card->iobase + PCI9050_INTR_REG, val); card->irq_enabled = 0; byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RESET); /* reset E1 processor */ card->state = CARD_STATE_UNUSED; card->err_log_state = ERRLOG_STATE_OFF; /* currently no log active */ spin_unlock_irqrestore(&card->hysdn_lock, flags); } /* ergo_stopcard */ /**************************************************************************/ /* enable or disable the cards error log. The event is queued if possible */ /**************************************************************************/ static void ergo_set_errlog_state(hysdn_card * card, int on) { unsigned long flags; if (card->state != CARD_STATE_RUN) { card->err_log_state = ERRLOG_STATE_OFF; /* must be off */ return; } spin_lock_irqsave(&card->hysdn_lock, flags); if (((card->err_log_state == ERRLOG_STATE_OFF) && !on) || ((card->err_log_state == ERRLOG_STATE_ON) && on)) { spin_unlock_irqrestore(&card->hysdn_lock, flags); return; /* nothing to do */ } if (on) card->err_log_state = ERRLOG_STATE_START; /* request start */ else card->err_log_state = ERRLOG_STATE_STOP; /* request stop */ spin_unlock_irqrestore(&card->hysdn_lock, flags); schedule_work(&card->irq_queue); } /* ergo_set_errlog_state */ /******************************************/ /* test the cards RAM and return 0 if ok. */ /******************************************/ static const char TestText[36] = "This Message is filler, why read it"; static int ergo_testram(hysdn_card * card) { tErgDpram *dpr = card->dpram; memset(dpr->TrapTable, 0, sizeof(dpr->TrapTable)); /* clear all Traps */ dpr->ToHyInt = 1; /* E1 INTR state forced */ memcpy(&dpr->ToHyBuf[ERG_TO_HY_BUF_SIZE - sizeof(TestText)], TestText, sizeof(TestText)); if (memcmp(&dpr->ToHyBuf[ERG_TO_HY_BUF_SIZE - sizeof(TestText)], TestText, sizeof(TestText))) return (-1); memcpy(&dpr->ToPcBuf[ERG_TO_PC_BUF_SIZE - sizeof(TestText)], TestText, sizeof(TestText)); if (memcmp(&dpr->ToPcBuf[ERG_TO_PC_BUF_SIZE - sizeof(TestText)], TestText, sizeof(TestText))) return (-1); return (0); } /* ergo_testram */ /*****************************************************************************/ /* this function is intended to write stage 1 boot image to the cards buffer */ /* this is done in two steps. First the 1024 hi-words are written (offs=0), */ /* then the 1024 lo-bytes are written. The remaining DPRAM is cleared, the */ /* PCI-write-buffers flushed and the card is taken out of reset. */ /* The function then waits for a reaction of the E1 processor or a timeout. */ /* Negative return values are interpreted as errors. */ /*****************************************************************************/ static int ergo_writebootimg(struct HYSDN_CARD *card, unsigned char *buf, unsigned long offs) { unsigned char *dst; tErgDpram *dpram; int cnt = (BOOT_IMG_SIZE >> 2); /* number of words to move and swap (byte order!) */ if (card->debug_flags & LOG_POF_CARD) hysdn_addlog(card, "ERGO: write bootldr offs=0x%lx ", offs); dst = card->dpram; /* pointer to start of DPRAM */ dst += (offs + ERG_DPRAM_FILL_SIZE); /* offset in the DPRAM */ while (cnt--) { *dst++ = *(buf + 1); /* high byte */ *dst++ = *buf; /* low byte */ dst += 2; /* point to next longword */ buf += 2; /* buffer only filled with words */ } /* if low words (offs = 2) have been written, clear the rest of the DPRAM, */ /* flush the PCI-write-buffer and take the E1 out of reset */ if (offs) { memset(card->dpram, 0, ERG_DPRAM_FILL_SIZE); /* fill the DPRAM still not cleared */ dpram = card->dpram; /* get pointer to dpram structure */ dpram->ToHyNoDpramErrLog = 0xFF; /* write a dpram register */ while (!dpram->ToHyNoDpramErrLog); /* reread volatile register to flush PCI */ byteout(card->iobase + PCI9050_USER_IO, PCI9050_E1_RUN); /* start E1 processor */ /* the interrupts are still masked */ msleep_interruptible(20); /* Timeout 20ms */ if (((tDpramBootSpooler *) card->dpram)->Len != DPRAM_SPOOLER_DATA_SIZE) { if (card->debug_flags & LOG_POF_CARD) hysdn_addlog(card, "ERGO: write bootldr no answer"); return (-ERR_BOOTIMG_FAIL); } } /* start_boot_img */ return (0); /* successful */ } /* ergo_writebootimg */ /********************************************************************************/ /* ergo_writebootseq writes the buffer containing len bytes to the E1 processor */ /* using the boot spool mechanism. If everything works fine 0 is returned. In */ /* case of errors a negative error value is returned. */ /********************************************************************************/ static int ergo_writebootseq(struct HYSDN_CARD *card, unsigned char *buf, int len) { tDpramBootSpooler *sp = (tDpramBootSpooler *) card->dpram; unsigned char *dst; unsigned char buflen; int nr_write; unsigned char tmp_rdptr; unsigned char wr_mirror; int i; if (card->debug_flags & LOG_POF_CARD) hysdn_addlog(card, "ERGO: write boot seq len=%d ", len); dst = sp->Data; /* point to data in spool structure */ buflen = sp->Len; /* maximum len of spooled data */ wr_mirror = sp->WrPtr; /* only once read */ /* try until all bytes written or error */ i = 0x1000; /* timeout value */ while (len) { /* first determine the number of bytes that may be buffered */ do { tmp_rdptr = sp->RdPtr; /* first read the pointer */ i--; /* decrement timeout */ } while (i && (tmp_rdptr != sp->RdPtr)); /* wait for stable pointer */ if (!i) { if (card->debug_flags & LOG_POF_CARD) hysdn_addlog(card, "ERGO: write boot seq timeout"); return (-ERR_BOOTSEQ_FAIL); /* value not stable -> timeout */ } if ((nr_write = tmp_rdptr - wr_mirror - 1) < 0) nr_write += buflen; /* now we got number of free bytes - 1 in buffer */ if (!nr_write) continue; /* no free bytes in buffer */ if (nr_write > len) nr_write = len; /* limit if last few bytes */ i = 0x1000; /* reset timeout value */ /* now we know how much bytes we may put in the puffer */ len -= nr_write; /* we savely could adjust len before output */ while (nr_write--) { *(dst + wr_mirror) = *buf++; /* output one byte */ if (++wr_mirror >= buflen) wr_mirror = 0; sp->WrPtr = wr_mirror; /* announce the next byte to E1 */ } /* while (nr_write) */ } /* while (len) */ return (0); } /* ergo_writebootseq */ /***********************************************************************************/ /* ergo_waitpofready waits for a maximum of 10 seconds for the completition of the */ /* boot process. If the process has been successful 0 is returned otherwise a */ /* negative error code is returned. */ /***********************************************************************************/ static int ergo_waitpofready(struct HYSDN_CARD *card) { tErgDpram *dpr = card->dpram; /* pointer to DPRAM structure */ int timecnt = 10000 / 50; /* timeout is 10 secs max. */ unsigned long flags; int msg_size; int i; if (card->debug_flags & LOG_POF_CARD) hysdn_addlog(card, "ERGO: waiting for pof ready"); while (timecnt--) { /* wait until timeout */ if (dpr->ToPcFlag) { /* data has arrived */ if ((dpr->ToPcChannel != CHAN_SYSTEM) || (dpr->ToPcSize < MIN_RDY_MSG_SIZE) || (dpr->ToPcSize > MAX_RDY_MSG_SIZE) || ((*(unsigned long *) dpr->ToPcBuf) != RDY_MAGIC)) break; /* an error occurred */ /* Check for additional data delivered during SysReady */ msg_size = dpr->ToPcSize - RDY_MAGIC_SIZE; if (msg_size > 0) if (EvalSysrTokData(card, dpr->ToPcBuf + RDY_MAGIC_SIZE, msg_size)) break; if (card->debug_flags & LOG_POF_RECORD) hysdn_addlog(card, "ERGO: pof boot success"); spin_lock_irqsave(&card->hysdn_lock, flags); card->state = CARD_STATE_RUN; /* now card is running */ /* enable the cards interrupt */ byteout(card->iobase + PCI9050_INTR_REG, bytein(card->iobase + PCI9050_INTR_REG) | (PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1)); card->irq_enabled = 1; /* we are ready to receive interrupts */ dpr->ToPcFlag = 0; /* reset data indicator */ dpr->ToHyInt = 1; dpr->ToPcInt = 1; /* interrupt to E1 for all cards */ spin_unlock_irqrestore(&card->hysdn_lock, flags); if ((hynet_enable & (1 << card->myid)) && (i = hysdn_net_create(card))) { ergo_stopcard(card); card->state = CARD_STATE_BOOTERR; return (i); } #ifdef CONFIG_HYSDN_CAPI if((i = hycapi_capi_create(card))) { printk(KERN_WARNING "HYSDN: failed to create capi-interface.\n"); } #endif /* CONFIG_HYSDN_CAPI */ return (0); /* success */ } /* data has arrived */ msleep_interruptible(50); /* Timeout 50ms */ } /* wait until timeout */ if (card->debug_flags & LOG_POF_CARD) hysdn_addlog(card, "ERGO: pof boot ready timeout"); return (-ERR_POF_TIMEOUT); } /* ergo_waitpofready */ /************************************************************************************/ /* release the cards hardware. Before releasing do a interrupt disable and hardware */ /* reset. Also unmap dpram. */ /* Use only during module release. */ /************************************************************************************/ static void ergo_releasehardware(hysdn_card * card) { ergo_stopcard(card); /* first stop the card if not already done */ free_irq(card->irq, card); /* release interrupt */ release_region(card->iobase + PCI9050_INTR_REG, 1); /* release all io ports */ release_region(card->iobase + PCI9050_USER_IO, 1); iounmap(card->dpram); card->dpram = NULL; /* release shared mem */ } /* ergo_releasehardware */ /*********************************************************************************/ /* acquire the needed hardware ports and map dpram. If an error occurs a nonzero */ /* value is returned. */ /* Use only during module init. */ /*********************************************************************************/ int ergo_inithardware(hysdn_card * card) { if (!request_region(card->iobase + PCI9050_INTR_REG, 1, "HYSDN")) return (-1); if (!request_region(card->iobase + PCI9050_USER_IO, 1, "HYSDN")) { release_region(card->iobase + PCI9050_INTR_REG, 1); return (-1); /* ports already in use */ } card->memend = card->membase + ERG_DPRAM_PAGE_SIZE - 1; if (!(card->dpram = ioremap(card->membase, ERG_DPRAM_PAGE_SIZE))) { release_region(card->iobase + PCI9050_INTR_REG, 1); release_region(card->iobase + PCI9050_USER_IO, 1); return (-1); } ergo_stopcard(card); /* disable interrupts */ if (request_irq(card->irq, ergo_interrupt, IRQF_SHARED, "HYSDN", card)) { ergo_releasehardware(card); /* return the acquired hardware */ return (-1); } /* success, now setup the function pointers */ card->stopcard = ergo_stopcard; card->releasehardware = ergo_releasehardware; card->testram = ergo_testram; card->writebootimg = ergo_writebootimg; card->writebootseq = ergo_writebootseq; card->waitpofready = ergo_waitpofready; card->set_errlog_state = ergo_set_errlog_state; INIT_WORK(&card->irq_queue, ergo_irq_bh); spin_lock_init(&card->hysdn_lock); return (0); } /* ergo_inithardware */
gpl-2.0
DirtyUnicorns/android_kernel_lge_mako
drivers/usb/gadget/ether.c
5021
12245
/* * ether.c -- Ethernet gadget driver, with CDC and non-CDC options * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* #define VERBOSE_DEBUG */ #include <linux/kernel.h> #include <linux/utsname.h> #if defined USB_ETH_RNDIS # undef USB_ETH_RNDIS #endif #ifdef CONFIG_USB_ETH_RNDIS # define USB_ETH_RNDIS y #endif #include "u_ether.h" /* * Ethernet gadget driver -- with CDC and non-CDC options * Builds on hardware support for a full duplex link. * * CDC Ethernet is the standard USB solution for sending Ethernet frames * using USB. Real hardware tends to use the same framing protocol but look * different for control features. This driver strongly prefers to use * this USB-IF standard as its open-systems interoperability solution; * most host side USB stacks (except from Microsoft) support it. * * This is sometimes called "CDC ECM" (Ethernet Control Model) to support * TLA-soup. "CDC ACM" (Abstract Control Model) is for modems, and a new * "CDC EEM" (Ethernet Emulation Model) is starting to spread. * * There's some hardware that can't talk CDC ECM. We make that hardware * implement a "minimalist" vendor-agnostic CDC core: same framing, but * link-level setup only requires activating the configuration. Only the * endpoint descriptors, and product/vendor IDs, are relevant; no control * operations are available. Linux supports it, but other host operating * systems may not. (This is a subset of CDC Ethernet.) * * It turns out that if you add a few descriptors to that "CDC Subset", * (Windows) host side drivers from MCCI can treat it as one submode of * a proprietary scheme called "SAFE" ... without needing to know about * specific product/vendor IDs. So we do that, making it easier to use * those MS-Windows drivers. Those added descriptors make it resemble a * CDC MDLM device, but they don't change device behavior at all. (See * MCCI Engineering report 950198 "SAFE Networking Functions".) * * A third option is also in use. Rather than CDC Ethernet, or something * simpler, Microsoft pushes their own approach: RNDIS. The published * RNDIS specs are ambiguous and appear to be incomplete, and are also * needlessly complex. They borrow more from CDC ACM than CDC ECM. */ #define DRIVER_DESC "Ethernet Gadget" #define DRIVER_VERSION "Memorial Day 2008" #ifdef USB_ETH_RNDIS #define PREFIX "RNDIS/" #else #define PREFIX "" #endif /* * This driver aims for interoperability by using CDC ECM unless * * can_support_ecm() * * returns false, in which case it supports the CDC Subset. By default, * that returns true; most hardware has no problems with CDC ECM, that's * a good default. Previous versions of this driver had no default; this * version changes that, removing overhead for new controller support. * * IF YOUR HARDWARE CAN'T SUPPORT CDC ECM, UPDATE THAT ROUTINE! */ static inline bool has_rndis(void) { #ifdef USB_ETH_RNDIS return true; #else return false; #endif } /*-------------------------------------------------------------------------*/ /* * Kbuild is not very cooperative with respect to linking separately * compiled library objects into one module. So for now we won't use * separate compilation ... ensuring init/exit sections work to shrink * the runtime footprint, and giving us at least some parts of what * a "gcc --combine ... part1.c part2.c part3.c ... " build would. */ #include "composite.c" #include "usbstring.c" #include "config.c" #include "epautoconf.c" #include "f_ecm.c" #include "f_subset.c" #ifdef USB_ETH_RNDIS #include "f_rndis.c" #include "rndis.c" #endif #include "f_eem.c" #include "u_ether.c" /*-------------------------------------------------------------------------*/ /* DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ /* Thanks to NetChip Technologies for donating this product ID. * It's for devices with only CDC Ethernet configurations. */ #define CDC_VENDOR_NUM 0x0525 /* NetChip */ #define CDC_PRODUCT_NUM 0xa4a1 /* Linux-USB Ethernet Gadget */ /* For hardware that can't talk CDC, we use the same vendor ID that * ARM Linux has used for ethernet-over-usb, both with sa1100 and * with pxa250. We're protocol-compatible, if the host-side drivers * use the endpoint descriptors. bcdDevice (version) is nonzero, so * drivers that need to hard-wire endpoint numbers have a hook. * * The protocol is a minimal subset of CDC Ether, which works on any bulk * hardware that's not deeply broken ... even on hardware that can't talk * RNDIS (like SA-1100, with no interrupt endpoint, or anything that * doesn't handle control-OUT). */ #define SIMPLE_VENDOR_NUM 0x049f #define SIMPLE_PRODUCT_NUM 0x505a /* For hardware that can talk RNDIS and either of the above protocols, * use this ID ... the windows INF files will know it. Unless it's * used with CDC Ethernet, Linux 2.4 hosts will need updates to choose * the non-RNDIS configuration. */ #define RNDIS_VENDOR_NUM 0x0525 /* NetChip */ #define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */ /* For EEM gadgets */ #define EEM_VENDOR_NUM 0x1d6b /* Linux Foundation */ #define EEM_PRODUCT_NUM 0x0102 /* EEM Gadget */ /*-------------------------------------------------------------------------*/ static struct usb_device_descriptor device_desc = { .bLength = sizeof device_desc, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = cpu_to_le16 (0x0200), .bDeviceClass = USB_CLASS_COMM, .bDeviceSubClass = 0, .bDeviceProtocol = 0, /* .bMaxPacketSize0 = f(hardware) */ /* Vendor and product id defaults change according to what configs * we support. (As does bNumConfigurations.) These values can * also be overridden by module parameters. */ .idVendor = cpu_to_le16 (CDC_VENDOR_NUM), .idProduct = cpu_to_le16 (CDC_PRODUCT_NUM), /* .bcdDevice = f(hardware) */ /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ /* NO SERIAL NUMBER */ .bNumConfigurations = 1, }; static struct usb_otg_descriptor otg_descriptor = { .bLength = sizeof otg_descriptor, .bDescriptorType = USB_DT_OTG, /* REVISIT SRP-only hardware is possible, although * it would not be called "OTG" ... */ .bmAttributes = USB_OTG_SRP | USB_OTG_HNP, }; static const struct usb_descriptor_header *otg_desc[] = { (struct usb_descriptor_header *) &otg_descriptor, NULL, }; /* string IDs are assigned dynamically */ #define STRING_MANUFACTURER_IDX 0 #define STRING_PRODUCT_IDX 1 static char manufacturer[50]; static struct usb_string strings_dev[] = { [STRING_MANUFACTURER_IDX].s = manufacturer, [STRING_PRODUCT_IDX].s = PREFIX DRIVER_DESC, { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; static u8 hostaddr[ETH_ALEN]; /*-------------------------------------------------------------------------*/ /* * We may not have an RNDIS configuration, but if we do it needs to be * the first one present. That's to make Microsoft's drivers happy, * and to follow DOCSIS 1.0 (cable modem standard). */ static int __init rndis_do_config(struct usb_configuration *c) { /* FIXME alloc iConfiguration string, set it in c->strings */ if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } return rndis_bind_config(c, hostaddr); } static struct usb_configuration rndis_config_driver = { .label = "RNDIS", .bConfigurationValue = 2, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; /*-------------------------------------------------------------------------*/ #ifdef CONFIG_USB_ETH_EEM static bool use_eem = 1; #else static bool use_eem; #endif module_param(use_eem, bool, 0); MODULE_PARM_DESC(use_eem, "use CDC EEM mode"); /* * We _always_ have an ECM, CDC Subset, or EEM configuration. */ static int __init eth_do_config(struct usb_configuration *c) { /* FIXME alloc iConfiguration string, set it in c->strings */ if (gadget_is_otg(c->cdev->gadget)) { c->descriptors = otg_desc; c->bmAttributes |= USB_CONFIG_ATT_WAKEUP; } if (use_eem) return eem_bind_config(c); else if (can_support_ecm(c->cdev->gadget)) return ecm_bind_config(c, hostaddr); else return geth_bind_config(c, hostaddr); } static struct usb_configuration eth_config_driver = { /* .label = f(hardware) */ .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_SELFPOWER, }; /*-------------------------------------------------------------------------*/ static int __init eth_bind(struct usb_composite_dev *cdev) { int gcnum; struct usb_gadget *gadget = cdev->gadget; int status; /* set up network link layer */ status = gether_setup(cdev->gadget, hostaddr); if (status < 0) return status; /* set up main config label and device descriptor */ if (use_eem) { /* EEM */ eth_config_driver.label = "CDC Ethernet (EEM)"; device_desc.idVendor = cpu_to_le16(EEM_VENDOR_NUM); device_desc.idProduct = cpu_to_le16(EEM_PRODUCT_NUM); } else if (can_support_ecm(cdev->gadget)) { /* ECM */ eth_config_driver.label = "CDC Ethernet (ECM)"; } else { /* CDC Subset */ eth_config_driver.label = "CDC Subset/SAFE"; device_desc.idVendor = cpu_to_le16(SIMPLE_VENDOR_NUM); device_desc.idProduct = cpu_to_le16(SIMPLE_PRODUCT_NUM); if (!has_rndis()) device_desc.bDeviceClass = USB_CLASS_VENDOR_SPEC; } if (has_rndis()) { /* RNDIS plus ECM-or-Subset */ device_desc.idVendor = cpu_to_le16(RNDIS_VENDOR_NUM); device_desc.idProduct = cpu_to_le16(RNDIS_PRODUCT_NUM); device_desc.bNumConfigurations = 2; } gcnum = usb_gadget_controller_number(gadget); if (gcnum >= 0) device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum); else { /* We assume that can_support_ecm() tells the truth; * but if the controller isn't recognized at all then * that assumption is a bit more likely to be wrong. */ dev_warn(&gadget->dev, "controller '%s' not recognized; trying %s\n", gadget->name, eth_config_driver.label); device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099); } /* Allocate string descriptor numbers ... note that string * contents can be overridden by the composite_dev glue. */ /* device descriptor strings: manufacturer, product */ snprintf(manufacturer, sizeof manufacturer, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_MANUFACTURER_IDX].id = status; device_desc.iManufacturer = status; status = usb_string_id(cdev); if (status < 0) goto fail; strings_dev[STRING_PRODUCT_IDX].id = status; device_desc.iProduct = status; /* register our configuration(s); RNDIS first, if it's used */ if (has_rndis()) { status = usb_add_config(cdev, &rndis_config_driver, rndis_do_config); if (status < 0) goto fail; } status = usb_add_config(cdev, &eth_config_driver, eth_do_config); if (status < 0) goto fail; dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n", DRIVER_DESC); return 0; fail: gether_cleanup(); return status; } static int __exit eth_unbind(struct usb_composite_dev *cdev) { gether_cleanup(); return 0; } static struct usb_composite_driver eth_driver = { .name = "g_ether", .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_SUPER, .unbind = __exit_p(eth_unbind), }; MODULE_DESCRIPTION(PREFIX DRIVER_DESC); MODULE_AUTHOR("David Brownell, Benedikt Spanger"); MODULE_LICENSE("GPL"); static int __init init(void) { return usb_composite_probe(&eth_driver, eth_bind); } module_init(init); static void __exit cleanup(void) { usb_composite_unregister(&eth_driver); } module_exit(cleanup);
gpl-2.0
TheNotOnly/linux-3.5
net/caif/cfserl.c
5277
4456
/* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/stddef.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <net/caif/caif_layer.h> #include <net/caif/cfpkt.h> #include <net/caif/cfserl.h> #define container_obj(layr) ((struct cfserl *) layr) #define CFSERL_STX 0x02 #define SERIAL_MINIUM_PACKET_SIZE 4 #define SERIAL_MAX_FRAMESIZE 4096 struct cfserl { struct cflayer layer; struct cfpkt *incomplete_frm; /* Protects parallel processing of incoming packets */ spinlock_t sync; bool usestx; }; static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid); struct cflayer *cfserl_create(int instance, bool use_stx) { struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC); if (!this) return NULL; caif_assert(offsetof(struct cfserl, layer) == 0); this->layer.receive = cfserl_receive; this->layer.transmit = cfserl_transmit; this->layer.ctrlcmd = cfserl_ctrlcmd; this->usestx = use_stx; spin_lock_init(&this->sync); snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1"); return &this->layer; } static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) { struct cfserl *layr = container_obj(l); u16 pkt_len; struct cfpkt *pkt = NULL; struct cfpkt *tail_pkt = NULL; u8 tmp8; u16 tmp; u8 stx = CFSERL_STX; int ret; u16 expectlen = 0; caif_assert(newpkt != NULL); spin_lock(&layr->sync); if (layr->incomplete_frm != NULL) { layr->incomplete_frm = cfpkt_append(layr->incomplete_frm, newpkt, expectlen); pkt = layr->incomplete_frm; if (pkt == NULL) { spin_unlock(&layr->sync); return -ENOMEM; } } else { pkt = newpkt; } layr->incomplete_frm = NULL; do { /* Search for STX at start of pkt if STX is used */ if (layr->usestx) { cfpkt_extr_head(pkt, &tmp8, 1); if (tmp8 != CFSERL_STX) { while (cfpkt_more(pkt) && tmp8 != CFSERL_STX) { cfpkt_extr_head(pkt, &tmp8, 1); } if (!cfpkt_more(pkt)) { cfpkt_destroy(pkt); layr->incomplete_frm = NULL; spin_unlock(&layr->sync); return -EPROTO; } } } pkt_len = cfpkt_getlen(pkt); /* * pkt_len is the accumulated length of the packet data * we have received so far. * Exit if frame doesn't hold length. */ if (pkt_len < 2) { if (layr->usestx) cfpkt_add_head(pkt, &stx, 1); layr->incomplete_frm = pkt; spin_unlock(&layr->sync); return 0; } /* * Find length of frame. * expectlen is the length we need for a full frame. */ cfpkt_peek_head(pkt, &tmp, 2); expectlen = le16_to_cpu(tmp) + 2; /* * Frame error handling */ if (expectlen < SERIAL_MINIUM_PACKET_SIZE || expectlen > SERIAL_MAX_FRAMESIZE) { if (!layr->usestx) { if (pkt != NULL) cfpkt_destroy(pkt); layr->incomplete_frm = NULL; expectlen = 0; spin_unlock(&layr->sync); return -EPROTO; } continue; } if (pkt_len < expectlen) { /* Too little received data */ if (layr->usestx) cfpkt_add_head(pkt, &stx, 1); layr->incomplete_frm = pkt; spin_unlock(&layr->sync); return 0; } /* * Enough data for at least one frame. * Split the frame, if too long */ if (pkt_len > expectlen) tail_pkt = cfpkt_split(pkt, expectlen); else tail_pkt = NULL; /* Send the first part of packet upwards.*/ spin_unlock(&layr->sync); ret = layr->layer.up->receive(layr->layer.up, pkt); spin_lock(&layr->sync); if (ret == -EILSEQ) { if (layr->usestx) { if (tail_pkt != NULL) pkt = cfpkt_append(pkt, tail_pkt, 0); /* Start search for next STX if frame failed */ continue; } else { cfpkt_destroy(pkt); pkt = NULL; } } pkt = tail_pkt; } while (pkt != NULL); spin_unlock(&layr->sync); return 0; } static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt) { struct cfserl *layr = container_obj(layer); u8 tmp8 = CFSERL_STX; if (layr->usestx) cfpkt_add_head(newpkt, &tmp8, 1); return layer->dn->transmit(layer->dn, newpkt); } static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid) { layr->up->ctrlcmd(layr->up, ctrl, phyid); }
gpl-2.0
chacox/chaco_9195_cm-13.0
drivers/media/dvb/dvb-usb/mxl111sf-demod.c
7581
15026
/* * mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator * * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "mxl111sf-demod.h" #include "mxl111sf-reg.h" /* debug */ static int mxl111sf_demod_debug; module_param_named(debug, mxl111sf_demod_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))."); #define mxl_dbg(fmt, arg...) \ if (mxl111sf_demod_debug) \ mxl_printk(KERN_DEBUG, fmt, ##arg) /* ------------------------------------------------------------------------ */ struct mxl111sf_demod_state { struct mxl111sf_state *mxl_state; struct mxl111sf_demod_config *cfg; struct dvb_frontend fe; }; /* ------------------------------------------------------------------------ */ static int mxl111sf_demod_read_reg(struct mxl111sf_demod_state *state, u8 addr, u8 *data) { return (state->cfg->read_reg) ? state->cfg->read_reg(state->mxl_state, addr, data) : -EINVAL; } static int mxl111sf_demod_write_reg(struct mxl111sf_demod_state *state, u8 addr, u8 data) { return (state->cfg->write_reg) ? state->cfg->write_reg(state->mxl_state, addr, data) : -EINVAL; } static int mxl111sf_demod_program_regs(struct mxl111sf_demod_state *state, struct mxl111sf_reg_ctrl_info *ctrl_reg_info) { return (state->cfg->program_regs) ? state->cfg->program_regs(state->mxl_state, ctrl_reg_info) : -EINVAL; } /* ------------------------------------------------------------------------ */ /* TPS */ static int mxl1x1sf_demod_get_tps_code_rate(struct mxl111sf_demod_state *state, fe_code_rate_t *code_rate) { u8 val; int ret = mxl111sf_demod_read_reg(state, V6_CODE_RATE_TPS_REG, &val); /* bit<2:0> - 000:1/2, 001:2/3, 010:3/4, 011:5/6, 100:7/8 */ if (mxl_fail(ret)) goto fail; switch (val & V6_CODE_RATE_TPS_MASK) { case 0: *code_rate = FEC_1_2; break; case 1: *code_rate = FEC_2_3; break; case 2: *code_rate = FEC_3_4; break; case 3: *code_rate = FEC_5_6; break; case 4: *code_rate = FEC_7_8; break; } fail: return ret; } static int mxl1x1sf_demod_get_tps_modulation(struct mxl111sf_demod_state *state, fe_modulation_t *modulation) { u8 val; int ret = mxl111sf_demod_read_reg(state, V6_MODORDER_TPS_REG, &val); /* Constellation, 00 : QPSK, 01 : 16QAM, 10:64QAM */ if (mxl_fail(ret)) goto fail; switch ((val & V6_PARAM_CONSTELLATION_MASK) >> 4) { case 0: *modulation = QPSK; break; case 1: *modulation = QAM_16; break; case 2: *modulation = QAM_64; break; } fail: return ret; } static int mxl1x1sf_demod_get_tps_guard_fft_mode(struct mxl111sf_demod_state *state, fe_transmit_mode_t *fft_mode) { u8 val; int ret = mxl111sf_demod_read_reg(state, V6_MODE_TPS_REG, &val); /* FFT Mode, 00:2K, 01:8K, 10:4K */ if (mxl_fail(ret)) goto fail; switch ((val & V6_PARAM_FFT_MODE_MASK) >> 2) { case 0: *fft_mode = TRANSMISSION_MODE_2K; break; case 1: *fft_mode = TRANSMISSION_MODE_8K; break; case 2: *fft_mode = TRANSMISSION_MODE_4K; break; } fail: return ret; } static int mxl1x1sf_demod_get_tps_guard_interval(struct mxl111sf_demod_state *state, fe_guard_interval_t *guard) { u8 val; int ret = mxl111sf_demod_read_reg(state, V6_CP_TPS_REG, &val); /* 00:1/32, 01:1/16, 10:1/8, 11:1/4 */ if (mxl_fail(ret)) goto fail; switch ((val & V6_PARAM_GI_MASK) >> 4) { case 0: *guard = GUARD_INTERVAL_1_32; break; case 1: *guard = GUARD_INTERVAL_1_16; break; case 2: *guard = GUARD_INTERVAL_1_8; break; case 3: *guard = GUARD_INTERVAL_1_4; break; } fail: return ret; } static int mxl1x1sf_demod_get_tps_hierarchy(struct mxl111sf_demod_state *state, fe_hierarchy_t *hierarchy) { u8 val; int ret = mxl111sf_demod_read_reg(state, V6_TPS_HIERACHY_REG, &val); /* bit<6:4> - 000:Non hierarchy, 001:1, 010:2, 011:4 */ if (mxl_fail(ret)) goto fail; switch ((val & V6_TPS_HIERARCHY_INFO_MASK) >> 6) { case 0: *hierarchy = HIERARCHY_NONE; break; case 1: *hierarchy = HIERARCHY_1; break; case 2: *hierarchy = HIERARCHY_2; break; case 3: *hierarchy = HIERARCHY_4; break; } fail: return ret; } /* ------------------------------------------------------------------------ */ /* LOCKS */ static int mxl1x1sf_demod_get_sync_lock_status(struct mxl111sf_demod_state *state, int *sync_lock) { u8 val = 0; int ret = mxl111sf_demod_read_reg(state, V6_SYNC_LOCK_REG, &val); if (mxl_fail(ret)) goto fail; *sync_lock = (val & SYNC_LOCK_MASK) >> 4; fail: return ret; } static int mxl1x1sf_demod_get_rs_lock_status(struct mxl111sf_demod_state *state, int *rs_lock) { u8 val = 0; int ret = mxl111sf_demod_read_reg(state, V6_RS_LOCK_DET_REG, &val); if (mxl_fail(ret)) goto fail; *rs_lock = (val & RS_LOCK_DET_MASK) >> 3; fail: return ret; } static int mxl1x1sf_demod_get_tps_lock_status(struct mxl111sf_demod_state *state, int *tps_lock) { u8 val = 0; int ret = mxl111sf_demod_read_reg(state, V6_TPS_LOCK_REG, &val); if (mxl_fail(ret)) goto fail; *tps_lock = (val & V6_PARAM_TPS_LOCK_MASK) >> 6; fail: return ret; } static int mxl1x1sf_demod_get_fec_lock_status(struct mxl111sf_demod_state *state, int *fec_lock) { u8 val = 0; int ret = mxl111sf_demod_read_reg(state, V6_IRQ_STATUS_REG, &val); if (mxl_fail(ret)) goto fail; *fec_lock = (val & IRQ_MASK_FEC_LOCK) >> 4; fail: return ret; } #if 0 static int mxl1x1sf_demod_get_cp_lock_status(struct mxl111sf_demod_state *state, int *cp_lock) { u8 val = 0; int ret = mxl111sf_demod_read_reg(state, V6_CP_LOCK_DET_REG, &val); if (mxl_fail(ret)) goto fail; *cp_lock = (val & V6_CP_LOCK_DET_MASK) >> 2; fail: return ret; } #endif static int mxl1x1sf_demod_reset_irq_status(struct mxl111sf_demod_state *state) { return mxl111sf_demod_write_reg(state, 0x0e, 0xff); } /* ------------------------------------------------------------------------ */ static int mxl111sf_demod_set_frontend(struct dvb_frontend *fe) { struct mxl111sf_demod_state *state = fe->demodulator_priv; int ret = 0; struct mxl111sf_reg_ctrl_info phy_pll_patch[] = { {0x00, 0xff, 0x01}, /* change page to 1 */ {0x40, 0xff, 0x05}, {0x40, 0xff, 0x01}, {0x41, 0xff, 0xca}, {0x41, 0xff, 0xc0}, {0x00, 0xff, 0x00}, /* change page to 0 */ {0, 0, 0} }; mxl_dbg("()"); if (fe->ops.tuner_ops.set_params) { ret = fe->ops.tuner_ops.set_params(fe); if (mxl_fail(ret)) goto fail; msleep(50); } ret = mxl111sf_demod_program_regs(state, phy_pll_patch); mxl_fail(ret); msleep(50); ret = mxl1x1sf_demod_reset_irq_status(state); mxl_fail(ret); msleep(100); fail: return ret; } /* ------------------------------------------------------------------------ */ #if 0 /* resets TS Packet error count */ /* After setting 7th bit of V5_PER_COUNT_RESET_REG, it should be reset to 0. */ static int mxl1x1sf_demod_reset_packet_error_count(struct mxl111sf_demod_state *state) { struct mxl111sf_reg_ctrl_info reset_per_count[] = { {0x20, 0x01, 0x01}, {0x20, 0x01, 0x00}, {0, 0, 0} }; return mxl111sf_demod_program_regs(state, reset_per_count); } #endif /* returns TS Packet error count */ /* PER Count = FEC_PER_COUNT * (2 ** (FEC_PER_SCALE * 4)) */ static int mxl111sf_demod_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct mxl111sf_demod_state *state = fe->demodulator_priv; u32 fec_per_count, fec_per_scale; u8 val; int ret; *ucblocks = 0; /* FEC_PER_COUNT Register */ ret = mxl111sf_demod_read_reg(state, V6_FEC_PER_COUNT_REG, &val); if (mxl_fail(ret)) goto fail; fec_per_count = val; /* FEC_PER_SCALE Register */ ret = mxl111sf_demod_read_reg(state, V6_FEC_PER_SCALE_REG, &val); if (mxl_fail(ret)) goto fail; val &= V6_FEC_PER_SCALE_MASK; val *= 4; fec_per_scale = 1 << val; fec_per_count *= fec_per_scale; *ucblocks = fec_per_count; fail: return ret; } #ifdef MXL111SF_DEMOD_ENABLE_CALCULATIONS /* FIXME: leaving this enabled breaks the build on some architectures, * and we shouldn't have any floating point math in the kernel, anyway. * * These macros need to be re-written, but it's harmless to simply * return zero for now. */ #define CALCULATE_BER(avg_errors, count) \ ((u32)(avg_errors * 4)/(count*64*188*8)) #define CALCULATE_SNR(data) \ ((u32)((10 * (u32)data / 64) - 2.5)) #else #define CALCULATE_BER(avg_errors, count) 0 #define CALCULATE_SNR(data) 0 #endif static int mxl111sf_demod_read_ber(struct dvb_frontend *fe, u32 *ber) { struct mxl111sf_demod_state *state = fe->demodulator_priv; u8 val1, val2, val3; int ret; *ber = 0; ret = mxl111sf_demod_read_reg(state, V6_RS_AVG_ERRORS_LSB_REG, &val1); if (mxl_fail(ret)) goto fail; ret = mxl111sf_demod_read_reg(state, V6_RS_AVG_ERRORS_MSB_REG, &val2); if (mxl_fail(ret)) goto fail; ret = mxl111sf_demod_read_reg(state, V6_N_ACCUMULATE_REG, &val3); if (mxl_fail(ret)) goto fail; *ber = CALCULATE_BER((val1 | (val2 << 8)), val3); fail: return ret; } static int mxl111sf_demod_calc_snr(struct mxl111sf_demod_state *state, u16 *snr) { u8 val1, val2; int ret; *snr = 0; ret = mxl111sf_demod_read_reg(state, V6_SNR_RB_LSB_REG, &val1); if (mxl_fail(ret)) goto fail; ret = mxl111sf_demod_read_reg(state, V6_SNR_RB_MSB_REG, &val2); if (mxl_fail(ret)) goto fail; *snr = CALCULATE_SNR(val1 | ((val2 & 0x03) << 8)); fail: return ret; } static int mxl111sf_demod_read_snr(struct dvb_frontend *fe, u16 *snr) { struct mxl111sf_demod_state *state = fe->demodulator_priv; int ret = mxl111sf_demod_calc_snr(state, snr); if (mxl_fail(ret)) goto fail; *snr /= 10; /* 0.1 dB */ fail: return ret; } static int mxl111sf_demod_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct mxl111sf_demod_state *state = fe->demodulator_priv; int ret, locked, cr_lock, sync_lock, fec_lock; *status = 0; ret = mxl1x1sf_demod_get_rs_lock_status(state, &locked); if (mxl_fail(ret)) goto fail; ret = mxl1x1sf_demod_get_tps_lock_status(state, &cr_lock); if (mxl_fail(ret)) goto fail; ret = mxl1x1sf_demod_get_sync_lock_status(state, &sync_lock); if (mxl_fail(ret)) goto fail; ret = mxl1x1sf_demod_get_fec_lock_status(state, &fec_lock); if (mxl_fail(ret)) goto fail; if (locked) *status |= FE_HAS_SIGNAL; if (cr_lock) *status |= FE_HAS_CARRIER; if (sync_lock) *status |= FE_HAS_SYNC; if (fec_lock) /* false positives? */ *status |= FE_HAS_VITERBI; if ((locked) && (cr_lock) && (sync_lock)) *status |= FE_HAS_LOCK; fail: return ret; } static int mxl111sf_demod_read_signal_strength(struct dvb_frontend *fe, u16 *signal_strength) { struct mxl111sf_demod_state *state = fe->demodulator_priv; fe_modulation_t modulation; u16 snr; mxl111sf_demod_calc_snr(state, &snr); mxl1x1sf_demod_get_tps_modulation(state, &modulation); switch (modulation) { case QPSK: *signal_strength = (snr >= 1300) ? min(65535, snr * 44) : snr * 38; break; case QAM_16: *signal_strength = (snr >= 1500) ? min(65535, snr * 38) : snr * 33; break; case QAM_64: *signal_strength = (snr >= 2000) ? min(65535, snr * 29) : snr * 25; break; default: *signal_strength = 0; return -EINVAL; } return 0; } static int mxl111sf_demod_get_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct mxl111sf_demod_state *state = fe->demodulator_priv; mxl_dbg("()"); #if 0 p->inversion = /* FIXME */ ? INVERSION_ON : INVERSION_OFF; #endif if (fe->ops.tuner_ops.get_bandwidth) fe->ops.tuner_ops.get_bandwidth(fe, &p->bandwidth_hz); if (fe->ops.tuner_ops.get_frequency) fe->ops.tuner_ops.get_frequency(fe, &p->frequency); mxl1x1sf_demod_get_tps_code_rate(state, &p->code_rate_HP); mxl1x1sf_demod_get_tps_code_rate(state, &p->code_rate_LP); mxl1x1sf_demod_get_tps_modulation(state, &p->modulation); mxl1x1sf_demod_get_tps_guard_fft_mode(state, &p->transmission_mode); mxl1x1sf_demod_get_tps_guard_interval(state, &p->guard_interval); mxl1x1sf_demod_get_tps_hierarchy(state, &p->hierarchy); return 0; } static int mxl111sf_demod_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 1000; return 0; } static void mxl111sf_demod_release(struct dvb_frontend *fe) { struct mxl111sf_demod_state *state = fe->demodulator_priv; mxl_dbg("()"); kfree(state); fe->demodulator_priv = NULL; } static struct dvb_frontend_ops mxl111sf_demod_ops = { .delsys = { SYS_DVBT }, .info = { .name = "MaxLinear MxL111SF DVB-T demodulator", .frequency_min = 177000000, .frequency_max = 858000000, .frequency_stepsize = 166666, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER }, .release = mxl111sf_demod_release, #if 0 .init = mxl111sf_init, .i2c_gate_ctrl = mxl111sf_i2c_gate_ctrl, #endif .set_frontend = mxl111sf_demod_set_frontend, .get_frontend = mxl111sf_demod_get_frontend, .get_tune_settings = mxl111sf_demod_get_tune_settings, .read_status = mxl111sf_demod_read_status, .read_signal_strength = mxl111sf_demod_read_signal_strength, .read_ber = mxl111sf_demod_read_ber, .read_snr = mxl111sf_demod_read_snr, .read_ucblocks = mxl111sf_demod_read_ucblocks, }; struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state, struct mxl111sf_demod_config *cfg) { struct mxl111sf_demod_state *state = NULL; mxl_dbg("()"); state = kzalloc(sizeof(struct mxl111sf_demod_state), GFP_KERNEL); if (state == NULL) return NULL; state->mxl_state = mxl_state; state->cfg = cfg; memcpy(&state->fe.ops, &mxl111sf_demod_ops, sizeof(struct dvb_frontend_ops)); state->fe.demodulator_priv = state; return &state->fe; } EXPORT_SYMBOL_GPL(mxl111sf_demod_attach); MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver"); MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1"); /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
javilonas/Thoth-GT-I9300-Sammy
net/l2tp/l2tp_netlink.c
7837
21384
/* * L2TP netlink layer, for management * * Copyright (c) 2008,2009,2010 Katalix Systems Ltd * * Partly based on the IrDA nelink implementation * (see net/irda/irnetlink.c) which is: * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org> * which is in turn partly based on the wireless netlink code: * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <net/sock.h> #include <net/genetlink.h> #include <net/udp.h> #include <linux/in.h> #include <linux/udp.h> #include <linux/socket.h> #include <linux/module.h> #include <linux/list.h> #include <net/net_namespace.h> #include <linux/l2tp.h> #include "l2tp_core.h" static struct genl_family l2tp_nl_family = { .id = GENL_ID_GENERATE, .name = L2TP_GENL_NAME, .version = L2TP_GENL_VERSION, .hdrsize = 0, .maxattr = L2TP_ATTR_MAX, }; /* Accessed under genl lock */ static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) { u32 tunnel_id; u32 session_id; char *ifname; struct l2tp_tunnel *tunnel; struct l2tp_session *session = NULL; struct net *net = genl_info_net(info); if (info->attrs[L2TP_ATTR_IFNAME]) { ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); session = l2tp_session_find_by_ifname(net, ifname); } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && (info->attrs[L2TP_ATTR_CONN_ID])) { tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel) session = l2tp_session_find(net, tunnel, session_id); } return session; } static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; void *hdr; int ret = -ENOBUFS; msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto out; } hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, &l2tp_nl_family, 0, L2TP_CMD_NOOP); if (IS_ERR(hdr)) { ret = PTR_ERR(hdr); goto err_out; } genlmsg_end(msg, hdr); return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); err_out: nlmsg_free(msg); out: return ret; } static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info) { u32 tunnel_id; u32 peer_tunnel_id; int proto_version; int fd; int ret = 0; struct l2tp_tunnel_cfg cfg = { 0, }; struct l2tp_tunnel *tunnel; struct net *net = genl_info_net(info); if (!info->attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; goto out; } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) { ret = -EINVAL; goto out; } peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]); if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) { ret = -EINVAL; goto out; } proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]); if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) { ret = -EINVAL; goto out; } cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]); fd = -1; if (info->attrs[L2TP_ATTR_FD]) { fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]); } else { if (info->attrs[L2TP_ATTR_IP_SADDR]) cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]); if (info->attrs[L2TP_ATTR_IP_DADDR]) cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]); if (info->attrs[L2TP_ATTR_UDP_SPORT]) cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]); if (info->attrs[L2TP_ATTR_UDP_DPORT]) cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]); if (info->attrs[L2TP_ATTR_UDP_CSUM]) cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]); } if (info->attrs[L2TP_ATTR_DEBUG]) cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel != NULL) { ret = -EEXIST; goto out; } ret = -EINVAL; switch (cfg.encap) { case L2TP_ENCAPTYPE_UDP: case L2TP_ENCAPTYPE_IP: ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id, peer_tunnel_id, &cfg, &tunnel); break; } out: return ret; } static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info) { struct l2tp_tunnel *tunnel; u32 tunnel_id; int ret = 0; struct net *net = genl_info_net(info); if (!info->attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; goto out; } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel == NULL) { ret = -ENODEV; goto out; } (void) l2tp_tunnel_delete(tunnel); out: return ret; } static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info) { struct l2tp_tunnel *tunnel; u32 tunnel_id; int ret = 0; struct net *net = genl_info_net(info); if (!info->attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; goto out; } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel == NULL) { ret = -ENODEV; goto out; } if (info->attrs[L2TP_ATTR_DEBUG]) tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); out: return ret; } static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, struct l2tp_tunnel *tunnel) { void *hdr; struct nlattr *nest; struct sock *sk = NULL; struct inet_sock *inet; hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_TUNNEL_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version); NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug); NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap); nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets); NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes); NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors); NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets); NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes); NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards); NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets); NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors); nla_nest_end(skb, nest); sk = tunnel->sock; if (!sk) goto out; inet = inet_sk(sk); switch (tunnel->encap) { case L2TP_ENCAPTYPE_UDP: NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)); NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)); NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT)); /* NOBREAK */ case L2TP_ENCAPTYPE_IP: NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr); NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr); break; } out: return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -1; } static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info) { struct l2tp_tunnel *tunnel; struct sk_buff *msg; u32 tunnel_id; int ret = -ENOBUFS; struct net *net = genl_info_net(info); if (!info->attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; goto out; } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel == NULL) { ret = -ENODEV; goto out; } msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto out; } ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq, NLM_F_ACK, tunnel); if (ret < 0) goto err_out; return genlmsg_unicast(net, msg, info->snd_pid); err_out: nlmsg_free(msg); out: return ret; } static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb) { int ti = cb->args[0]; struct l2tp_tunnel *tunnel; struct net *net = sock_net(skb->sk); for (;;) { tunnel = l2tp_tunnel_find_nth(net, ti); if (tunnel == NULL) goto out; if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, tunnel) <= 0) goto out; ti++; } out: cb->args[0] = ti; return skb->len; } static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info) { u32 tunnel_id = 0; u32 session_id; u32 peer_session_id; int ret = 0; struct l2tp_tunnel *tunnel; struct l2tp_session *session; struct l2tp_session_cfg cfg = { 0, }; struct net *net = genl_info_net(info); if (!info->attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; goto out; } tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (!tunnel) { ret = -ENODEV; goto out; } if (!info->attrs[L2TP_ATTR_SESSION_ID]) { ret = -EINVAL; goto out; } session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); session = l2tp_session_find(net, tunnel, session_id); if (session) { ret = -EEXIST; goto out; } if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) { ret = -EINVAL; goto out; } peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]); if (!info->attrs[L2TP_ATTR_PW_TYPE]) { ret = -EINVAL; goto out; } cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]); if (cfg.pw_type >= __L2TP_PWTYPE_MAX) { ret = -EINVAL; goto out; } if (tunnel->version > 2) { if (info->attrs[L2TP_ATTR_OFFSET]) cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]); if (info->attrs[L2TP_ATTR_DATA_SEQ]) cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT; if (info->attrs[L2TP_ATTR_L2SPEC_TYPE]) cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]); cfg.l2specific_len = 4; if (info->attrs[L2TP_ATTR_L2SPEC_LEN]) cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]); if (info->attrs[L2TP_ATTR_COOKIE]) { u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]); if (len > 8) { ret = -EINVAL; goto out; } cfg.cookie_len = len; memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len); } if (info->attrs[L2TP_ATTR_PEER_COOKIE]) { u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]); if (len > 8) { ret = -EINVAL; goto out; } cfg.peer_cookie_len = len; memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len); } if (info->attrs[L2TP_ATTR_IFNAME]) cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); if (info->attrs[L2TP_ATTR_VLAN_ID]) cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]); } if (info->attrs[L2TP_ATTR_DEBUG]) cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); if (info->attrs[L2TP_ATTR_RECV_SEQ]) cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); if (info->attrs[L2TP_ATTR_SEND_SEQ]) cfg.send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); if (info->attrs[L2TP_ATTR_LNS_MODE]) cfg.lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); if (info->attrs[L2TP_ATTR_RECV_TIMEOUT]) cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); if (info->attrs[L2TP_ATTR_MTU]) cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); if (info->attrs[L2TP_ATTR_MRU]) cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]); if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) || (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) { ret = -EPROTONOSUPPORT; goto out; } /* Check that pseudowire-specific params are present */ switch (cfg.pw_type) { case L2TP_PWTYPE_NONE: break; case L2TP_PWTYPE_ETH_VLAN: if (!info->attrs[L2TP_ATTR_VLAN_ID]) { ret = -EINVAL; goto out; } break; case L2TP_PWTYPE_ETH: break; case L2TP_PWTYPE_PPP: case L2TP_PWTYPE_PPP_AC: break; case L2TP_PWTYPE_IP: default: ret = -EPROTONOSUPPORT; break; } ret = -EPROTONOSUPPORT; if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create) ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id, session_id, peer_session_id, &cfg); out: return ret; } static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *info) { int ret = 0; struct l2tp_session *session; u16 pw_type; session = l2tp_nl_session_find(info); if (session == NULL) { ret = -ENODEV; goto out; } pw_type = session->pwtype; if (pw_type < __L2TP_PWTYPE_MAX) if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); out: return ret; } static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *info) { int ret = 0; struct l2tp_session *session; session = l2tp_nl_session_find(info); if (session == NULL) { ret = -ENODEV; goto out; } if (info->attrs[L2TP_ATTR_DEBUG]) session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); if (info->attrs[L2TP_ATTR_DATA_SEQ]) session->data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); if (info->attrs[L2TP_ATTR_RECV_SEQ]) session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); if (info->attrs[L2TP_ATTR_SEND_SEQ]) session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); if (info->attrs[L2TP_ATTR_LNS_MODE]) session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); if (info->attrs[L2TP_ATTR_RECV_TIMEOUT]) session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); if (info->attrs[L2TP_ATTR_MTU]) session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]); if (info->attrs[L2TP_ATTR_MRU]) session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]); out: return ret; } static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, struct l2tp_session *session) { void *hdr; struct nlattr *nest; struct l2tp_tunnel *tunnel = session->tunnel; struct sock *sk = NULL; sk = tunnel->sock; hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id); NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id); NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug); NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype); NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu); if (session->mru) NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru); if (session->ifname && session->ifname[0]) NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname); if (session->cookie_len) NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]); if (session->peer_cookie_len) NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]); NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq); NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq); NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode); #ifdef CONFIG_XFRM if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1); #endif if (session->reorder_timeout) NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout); nest = nla_nest_start(skb, L2TP_ATTR_STATS); if (nest == NULL) goto nla_put_failure; NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets); NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes); NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors); NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets); NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes); NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards); NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets); NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors); nla_nest_end(skb, nest); return genlmsg_end(skb, hdr); nla_put_failure: genlmsg_cancel(skb, hdr); return -1; } static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info) { struct l2tp_session *session; struct sk_buff *msg; int ret; session = l2tp_nl_session_find(info); if (session == NULL) { ret = -ENODEV; goto out; } msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto out; } ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq, 0, session); if (ret < 0) goto err_out; return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); err_out: nlmsg_free(msg); out: return ret; } static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct l2tp_session *session; struct l2tp_tunnel *tunnel = NULL; int ti = cb->args[0]; int si = cb->args[1]; for (;;) { if (tunnel == NULL) { tunnel = l2tp_tunnel_find_nth(net, ti); if (tunnel == NULL) goto out; } session = l2tp_session_find_nth(tunnel, si); if (session == NULL) { ti++; tunnel = NULL; si = 0; continue; } if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, session) <= 0) break; si++; } out: cb->args[0] = ti; cb->args[1] = si; return skb->len; } static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = { [L2TP_ATTR_NONE] = { .type = NLA_UNSPEC, }, [L2TP_ATTR_PW_TYPE] = { .type = NLA_U16, }, [L2TP_ATTR_ENCAP_TYPE] = { .type = NLA_U16, }, [L2TP_ATTR_OFFSET] = { .type = NLA_U16, }, [L2TP_ATTR_DATA_SEQ] = { .type = NLA_U8, }, [L2TP_ATTR_L2SPEC_TYPE] = { .type = NLA_U8, }, [L2TP_ATTR_L2SPEC_LEN] = { .type = NLA_U8, }, [L2TP_ATTR_PROTO_VERSION] = { .type = NLA_U8, }, [L2TP_ATTR_CONN_ID] = { .type = NLA_U32, }, [L2TP_ATTR_PEER_CONN_ID] = { .type = NLA_U32, }, [L2TP_ATTR_SESSION_ID] = { .type = NLA_U32, }, [L2TP_ATTR_PEER_SESSION_ID] = { .type = NLA_U32, }, [L2TP_ATTR_UDP_CSUM] = { .type = NLA_U8, }, [L2TP_ATTR_VLAN_ID] = { .type = NLA_U16, }, [L2TP_ATTR_DEBUG] = { .type = NLA_U32, }, [L2TP_ATTR_RECV_SEQ] = { .type = NLA_U8, }, [L2TP_ATTR_SEND_SEQ] = { .type = NLA_U8, }, [L2TP_ATTR_LNS_MODE] = { .type = NLA_U8, }, [L2TP_ATTR_USING_IPSEC] = { .type = NLA_U8, }, [L2TP_ATTR_RECV_TIMEOUT] = { .type = NLA_MSECS, }, [L2TP_ATTR_FD] = { .type = NLA_U32, }, [L2TP_ATTR_IP_SADDR] = { .type = NLA_U32, }, [L2TP_ATTR_IP_DADDR] = { .type = NLA_U32, }, [L2TP_ATTR_UDP_SPORT] = { .type = NLA_U16, }, [L2TP_ATTR_UDP_DPORT] = { .type = NLA_U16, }, [L2TP_ATTR_MTU] = { .type = NLA_U16, }, [L2TP_ATTR_MRU] = { .type = NLA_U16, }, [L2TP_ATTR_STATS] = { .type = NLA_NESTED, }, [L2TP_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1, }, [L2TP_ATTR_COOKIE] = { .type = NLA_BINARY, .len = 8, }, [L2TP_ATTR_PEER_COOKIE] = { .type = NLA_BINARY, .len = 8, }, }; static struct genl_ops l2tp_nl_ops[] = { { .cmd = L2TP_CMD_NOOP, .doit = l2tp_nl_cmd_noop, .policy = l2tp_nl_policy, /* can be retrieved by unprivileged users */ }, { .cmd = L2TP_CMD_TUNNEL_CREATE, .doit = l2tp_nl_cmd_tunnel_create, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_TUNNEL_DELETE, .doit = l2tp_nl_cmd_tunnel_delete, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_TUNNEL_MODIFY, .doit = l2tp_nl_cmd_tunnel_modify, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_TUNNEL_GET, .doit = l2tp_nl_cmd_tunnel_get, .dumpit = l2tp_nl_cmd_tunnel_dump, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_SESSION_CREATE, .doit = l2tp_nl_cmd_session_create, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_SESSION_DELETE, .doit = l2tp_nl_cmd_session_delete, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_SESSION_MODIFY, .doit = l2tp_nl_cmd_session_modify, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = L2TP_CMD_SESSION_GET, .doit = l2tp_nl_cmd_session_get, .dumpit = l2tp_nl_cmd_session_dump, .policy = l2tp_nl_policy, .flags = GENL_ADMIN_PERM, }, }; int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops) { int ret; ret = -EINVAL; if (pw_type >= __L2TP_PWTYPE_MAX) goto err; genl_lock(); ret = -EBUSY; if (l2tp_nl_cmd_ops[pw_type]) goto out; l2tp_nl_cmd_ops[pw_type] = ops; ret = 0; out: genl_unlock(); err: return ret; } EXPORT_SYMBOL_GPL(l2tp_nl_register_ops); void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type) { if (pw_type < __L2TP_PWTYPE_MAX) { genl_lock(); l2tp_nl_cmd_ops[pw_type] = NULL; genl_unlock(); } } EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops); static int l2tp_nl_init(void) { int err; printk(KERN_INFO "L2TP netlink interface\n"); err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops, ARRAY_SIZE(l2tp_nl_ops)); return err; } static void l2tp_nl_cleanup(void) { genl_unregister_family(&l2tp_nl_family); } module_init(l2tp_nl_init); module_exit(l2tp_nl_cleanup); MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); MODULE_DESCRIPTION("L2TP netlink"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0"); MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \ __stringify(NETLINK_GENERIC) "-type-" "l2tp");
gpl-2.0
agat63/AGAT_GB27_kernel
fs/afs/rxrpc.c
9373
20670
/* Maintain an RxRPC server socket to do AFS communications through * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/slab.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include <rxrpc/packet.h> #include "internal.h" #include "afs_cm.h" static struct socket *afs_socket; /* my RxRPC socket */ static struct workqueue_struct *afs_async_calls; static atomic_t afs_outstanding_calls; static atomic_t afs_outstanding_skbs; static void afs_wake_up_call_waiter(struct afs_call *); static int afs_wait_for_call_to_complete(struct afs_call *); static void afs_wake_up_async_call(struct afs_call *); static int afs_dont_wait_for_call_to_complete(struct afs_call *); static void afs_process_async_call(struct work_struct *); static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *); static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool); /* synchronous call management */ const struct afs_wait_mode afs_sync_call = { .rx_wakeup = afs_wake_up_call_waiter, .wait = afs_wait_for_call_to_complete, }; /* asynchronous call management */ const struct afs_wait_mode afs_async_call = { .rx_wakeup = afs_wake_up_async_call, .wait = afs_dont_wait_for_call_to_complete, }; /* asynchronous incoming call management */ static const struct afs_wait_mode afs_async_incoming_call = { .rx_wakeup = afs_wake_up_async_call, }; /* asynchronous incoming call initial processing */ static const struct afs_call_type afs_RXCMxxxx = { .name = "CB.xxxx", .deliver = afs_deliver_cm_op_id, .abort_to_error = afs_abort_to_error, }; static void afs_collect_incoming_call(struct work_struct *); static struct sk_buff_head afs_incoming_calls; static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call); /* * open an RxRPC socket and bind it to be a server for callback notifications * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT */ int afs_open_socket(void) { struct sockaddr_rxrpc srx; struct socket *socket; int ret; _enter(""); skb_queue_head_init(&afs_incoming_calls); afs_async_calls = create_singlethread_workqueue("kafsd"); if (!afs_async_calls) { _leave(" = -ENOMEM [wq]"); return -ENOMEM; } ret = sock_create_kern(AF_RXRPC, SOCK_DGRAM, PF_INET, &socket); if (ret < 0) { destroy_workqueue(afs_async_calls); _leave(" = %d [socket]", ret); return ret; } socket->sk->sk_allocation = GFP_NOFS; /* bind the callback manager's address to make this a server socket */ srx.srx_family = AF_RXRPC; srx.srx_service = CM_SERVICE; srx.transport_type = SOCK_DGRAM; srx.transport_len = sizeof(srx.transport.sin); srx.transport.sin.sin_family = AF_INET; srx.transport.sin.sin_port = htons(AFS_CM_PORT); memset(&srx.transport.sin.sin_addr, 0, sizeof(srx.transport.sin.sin_addr)); ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); if (ret < 0) { sock_release(socket); destroy_workqueue(afs_async_calls); _leave(" = %d [bind]", ret); return ret; } rxrpc_kernel_intercept_rx_messages(socket, afs_rx_interceptor); afs_socket = socket; _leave(" = 0"); return 0; } /* * close the RxRPC socket AFS was using */ void afs_close_socket(void) { _enter(""); sock_release(afs_socket); _debug("dework"); destroy_workqueue(afs_async_calls); ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0); ASSERTCMP(atomic_read(&afs_outstanding_calls), ==, 0); _leave(""); } /* * note that the data in a socket buffer is now delivered and that the buffer * should be freed */ static void afs_data_delivered(struct sk_buff *skb) { if (!skb) { _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs)); dump_stack(); } else { _debug("DLVR %p{%u} [%d]", skb, skb->mark, atomic_read(&afs_outstanding_skbs)); if (atomic_dec_return(&afs_outstanding_skbs) == -1) BUG(); rxrpc_kernel_data_delivered(skb); } } /* * free a socket buffer */ static void afs_free_skb(struct sk_buff *skb) { if (!skb) { _debug("FREE NULL [%d]", atomic_read(&afs_outstanding_skbs)); dump_stack(); } else { _debug("FREE %p{%u} [%d]", skb, skb->mark, atomic_read(&afs_outstanding_skbs)); if (atomic_dec_return(&afs_outstanding_skbs) == -1) BUG(); rxrpc_kernel_free_skb(skb); } } /* * free a call */ static void afs_free_call(struct afs_call *call) { _debug("DONE %p{%s} [%d]", call, call->type->name, atomic_read(&afs_outstanding_calls)); if (atomic_dec_return(&afs_outstanding_calls) == -1) BUG(); ASSERTCMP(call->rxcall, ==, NULL); ASSERT(!work_pending(&call->async_work)); ASSERT(skb_queue_empty(&call->rx_queue)); ASSERT(call->type->name != NULL); kfree(call->request); kfree(call); } /* * allocate a call with flat request and reply buffers */ struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, size_t request_size, size_t reply_size) { struct afs_call *call; call = kzalloc(sizeof(*call), GFP_NOFS); if (!call) goto nomem_call; _debug("CALL %p{%s} [%d]", call, type->name, atomic_read(&afs_outstanding_calls)); atomic_inc(&afs_outstanding_calls); call->type = type; call->request_size = request_size; call->reply_max = reply_size; if (request_size) { call->request = kmalloc(request_size, GFP_NOFS); if (!call->request) goto nomem_free; } if (reply_size) { call->buffer = kmalloc(reply_size, GFP_NOFS); if (!call->buffer) goto nomem_free; } init_waitqueue_head(&call->waitq); skb_queue_head_init(&call->rx_queue); return call; nomem_free: afs_free_call(call); nomem_call: return NULL; } /* * clean up a call with flat buffer */ void afs_flat_call_destructor(struct afs_call *call) { _enter(""); kfree(call->request); call->request = NULL; kfree(call->buffer); call->buffer = NULL; } /* * attach the data from a bunch of pages on an inode to a call */ static int afs_send_pages(struct afs_call *call, struct msghdr *msg, struct kvec *iov) { struct page *pages[8]; unsigned count, n, loop, offset, to; pgoff_t first = call->first, last = call->last; int ret; _enter(""); offset = call->first_offset; call->first_offset = 0; do { _debug("attach %lx-%lx", first, last); count = last - first + 1; if (count > ARRAY_SIZE(pages)) count = ARRAY_SIZE(pages); n = find_get_pages_contig(call->mapping, first, count, pages); ASSERTCMP(n, ==, count); loop = 0; do { msg->msg_flags = 0; to = PAGE_SIZE; if (first + loop >= last) to = call->last_to; else msg->msg_flags = MSG_MORE; iov->iov_base = kmap(pages[loop]) + offset; iov->iov_len = to - offset; offset = 0; _debug("- range %u-%u%s", offset, to, msg->msg_flags ? " [more]" : ""); msg->msg_iov = (struct iovec *) iov; msg->msg_iovlen = 1; /* have to change the state *before* sending the last * packet as RxRPC might give us the reply before it * returns from sending the request */ if (first + loop >= last) call->state = AFS_CALL_AWAIT_REPLY; ret = rxrpc_kernel_send_data(call->rxcall, msg, to - offset); kunmap(pages[loop]); if (ret < 0) break; } while (++loop < count); first += count; for (loop = 0; loop < count; loop++) put_page(pages[loop]); if (ret < 0) break; } while (first <= last); _leave(" = %d", ret); return ret; } /* * initiate a call */ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, const struct afs_wait_mode *wait_mode) { struct sockaddr_rxrpc srx; struct rxrpc_call *rxcall; struct msghdr msg; struct kvec iov[1]; int ret; struct sk_buff *skb; _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); ASSERT(call->type != NULL); ASSERT(call->type->name != NULL); _debug("____MAKE %p{%s,%x} [%d]____", call, call->type->name, key_serial(call->key), atomic_read(&afs_outstanding_calls)); call->wait_mode = wait_mode; INIT_WORK(&call->async_work, afs_process_async_call); memset(&srx, 0, sizeof(srx)); srx.srx_family = AF_RXRPC; srx.srx_service = call->service_id; srx.transport_type = SOCK_DGRAM; srx.transport_len = sizeof(srx.transport.sin); srx.transport.sin.sin_family = AF_INET; srx.transport.sin.sin_port = call->port; memcpy(&srx.transport.sin.sin_addr, addr, 4); /* create a call */ rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key, (unsigned long) call, gfp); call->key = NULL; if (IS_ERR(rxcall)) { ret = PTR_ERR(rxcall); goto error_kill_call; } call->rxcall = rxcall; /* send the request */ iov[0].iov_base = call->request; iov[0].iov_len = call->request_size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = (struct iovec *) iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = (call->send_pages ? MSG_MORE : 0); /* have to change the state *before* sending the last packet as RxRPC * might give us the reply before it returns from sending the * request */ if (!call->send_pages) call->state = AFS_CALL_AWAIT_REPLY; ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size); if (ret < 0) goto error_do_abort; if (call->send_pages) { ret = afs_send_pages(call, &msg, iov); if (ret < 0) goto error_do_abort; } /* at this point, an async call may no longer exist as it may have * already completed */ return wait_mode->wait(call); error_do_abort: rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); rxrpc_kernel_end_call(rxcall); call->rxcall = NULL; error_kill_call: call->type->destructor(call); afs_free_call(call); _leave(" = %d", ret); return ret; } /* * handles intercepted messages that were arriving in the socket's Rx queue * - called with the socket receive queue lock held to ensure message ordering * - called with softirqs disabled */ static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID, struct sk_buff *skb) { struct afs_call *call = (struct afs_call *) user_call_ID; _enter("%p,,%u", call, skb->mark); _debug("ICPT %p{%u} [%d]", skb, skb->mark, atomic_read(&afs_outstanding_skbs)); ASSERTCMP(sk, ==, afs_socket->sk); atomic_inc(&afs_outstanding_skbs); if (!call) { /* its an incoming call for our callback service */ skb_queue_tail(&afs_incoming_calls, skb); queue_work(afs_wq, &afs_collect_incoming_call_work); } else { /* route the messages directly to the appropriate call */ skb_queue_tail(&call->rx_queue, skb); call->wait_mode->rx_wakeup(call); } _leave(""); } /* * deliver messages to a call */ static void afs_deliver_to_call(struct afs_call *call) { struct sk_buff *skb; bool last; u32 abort_code; int ret; _enter(""); while ((call->state == AFS_CALL_AWAIT_REPLY || call->state == AFS_CALL_AWAIT_OP_ID || call->state == AFS_CALL_AWAIT_REQUEST || call->state == AFS_CALL_AWAIT_ACK) && (skb = skb_dequeue(&call->rx_queue))) { switch (skb->mark) { case RXRPC_SKB_MARK_DATA: _debug("Rcv DATA"); last = rxrpc_kernel_is_data_last(skb); ret = call->type->deliver(call, skb, last); switch (ret) { case 0: if (last && call->state == AFS_CALL_AWAIT_REPLY) call->state = AFS_CALL_COMPLETE; break; case -ENOTCONN: abort_code = RX_CALL_DEAD; goto do_abort; case -ENOTSUPP: abort_code = RX_INVALID_OPERATION; goto do_abort; default: abort_code = RXGEN_CC_UNMARSHAL; if (call->state != AFS_CALL_AWAIT_REPLY) abort_code = RXGEN_SS_UNMARSHAL; do_abort: rxrpc_kernel_abort_call(call->rxcall, abort_code); call->error = ret; call->state = AFS_CALL_ERROR; break; } afs_data_delivered(skb); skb = NULL; continue; case RXRPC_SKB_MARK_FINAL_ACK: _debug("Rcv ACK"); call->state = AFS_CALL_COMPLETE; break; case RXRPC_SKB_MARK_BUSY: _debug("Rcv BUSY"); call->error = -EBUSY; call->state = AFS_CALL_BUSY; break; case RXRPC_SKB_MARK_REMOTE_ABORT: abort_code = rxrpc_kernel_get_abort_code(skb); call->error = call->type->abort_to_error(abort_code); call->state = AFS_CALL_ABORTED; _debug("Rcv ABORT %u -> %d", abort_code, call->error); break; case RXRPC_SKB_MARK_NET_ERROR: call->error = -rxrpc_kernel_get_error_number(skb); call->state = AFS_CALL_ERROR; _debug("Rcv NET ERROR %d", call->error); break; case RXRPC_SKB_MARK_LOCAL_ERROR: call->error = -rxrpc_kernel_get_error_number(skb); call->state = AFS_CALL_ERROR; _debug("Rcv LOCAL ERROR %d", call->error); break; default: BUG(); break; } afs_free_skb(skb); } /* make sure the queue is empty if the call is done with (we might have * aborted the call early because of an unmarshalling error) */ if (call->state >= AFS_CALL_COMPLETE) { while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); if (call->incoming) { rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; call->type->destructor(call); afs_free_call(call); } } _leave(""); } /* * wait synchronously for a call to complete */ static int afs_wait_for_call_to_complete(struct afs_call *call) { struct sk_buff *skb; int ret; DECLARE_WAITQUEUE(myself, current); _enter(""); add_wait_queue(&call->waitq, &myself); for (;;) { set_current_state(TASK_INTERRUPTIBLE); /* deliver any messages that are in the queue */ if (!skb_queue_empty(&call->rx_queue)) { __set_current_state(TASK_RUNNING); afs_deliver_to_call(call); continue; } ret = call->error; if (call->state >= AFS_CALL_COMPLETE) break; ret = -EINTR; if (signal_pending(current)) break; schedule(); } remove_wait_queue(&call->waitq, &myself); __set_current_state(TASK_RUNNING); /* kill the call */ if (call->state < AFS_CALL_COMPLETE) { _debug("call incomplete"); rxrpc_kernel_abort_call(call->rxcall, RX_CALL_DEAD); while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); } _debug("call complete"); rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; call->type->destructor(call); afs_free_call(call); _leave(" = %d", ret); return ret; } /* * wake up a waiting call */ static void afs_wake_up_call_waiter(struct afs_call *call) { wake_up(&call->waitq); } /* * wake up an asynchronous call */ static void afs_wake_up_async_call(struct afs_call *call) { _enter(""); queue_work(afs_async_calls, &call->async_work); } /* * put a call into asynchronous mode * - mustn't touch the call descriptor as the call my have completed by the * time we get here */ static int afs_dont_wait_for_call_to_complete(struct afs_call *call) { _enter(""); return -EINPROGRESS; } /* * delete an asynchronous call */ static void afs_delete_async_call(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, async_work); _enter(""); afs_free_call(call); _leave(""); } /* * perform processing on an asynchronous call * - on a multiple-thread workqueue this work item may try to run on several * CPUs at the same time */ static void afs_process_async_call(struct work_struct *work) { struct afs_call *call = container_of(work, struct afs_call, async_work); _enter(""); if (!skb_queue_empty(&call->rx_queue)) afs_deliver_to_call(call); if (call->state >= AFS_CALL_COMPLETE && call->wait_mode) { if (call->wait_mode->async_complete) call->wait_mode->async_complete(call->reply, call->error); call->reply = NULL; /* kill the call */ rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; if (call->type->destructor) call->type->destructor(call); /* we can't just delete the call because the work item may be * queued */ PREPARE_WORK(&call->async_work, afs_delete_async_call); queue_work(afs_async_calls, &call->async_work); } _leave(""); } /* * empty a socket buffer into a flat reply buffer */ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb) { size_t len = skb->len; if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0) BUG(); call->reply_size += len; } /* * accept the backlog of incoming calls */ static void afs_collect_incoming_call(struct work_struct *work) { struct rxrpc_call *rxcall; struct afs_call *call = NULL; struct sk_buff *skb; while ((skb = skb_dequeue(&afs_incoming_calls))) { _debug("new call"); /* don't need the notification */ afs_free_skb(skb); if (!call) { call = kzalloc(sizeof(struct afs_call), GFP_KERNEL); if (!call) { rxrpc_kernel_reject_call(afs_socket); return; } INIT_WORK(&call->async_work, afs_process_async_call); call->wait_mode = &afs_async_incoming_call; call->type = &afs_RXCMxxxx; init_waitqueue_head(&call->waitq); skb_queue_head_init(&call->rx_queue); call->state = AFS_CALL_AWAIT_OP_ID; _debug("CALL %p{%s} [%d]", call, call->type->name, atomic_read(&afs_outstanding_calls)); atomic_inc(&afs_outstanding_calls); } rxcall = rxrpc_kernel_accept_call(afs_socket, (unsigned long) call); if (!IS_ERR(rxcall)) { call->rxcall = rxcall; call = NULL; } } if (call) afs_free_call(call); } /* * grab the operation ID from an incoming cache manager call */ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb, bool last) { size_t len = skb->len; void *oibuf = (void *) &call->operation_ID; _enter("{%u},{%zu},%d", call->offset, len, last); ASSERTCMP(call->offset, <, 4); /* the operation ID forms the first four bytes of the request data */ len = min_t(size_t, len, 4 - call->offset); if (skb_copy_bits(skb, 0, oibuf + call->offset, len) < 0) BUG(); if (!pskb_pull(skb, len)) BUG(); call->offset += len; if (call->offset < 4) { if (last) { _leave(" = -EBADMSG [op ID short]"); return -EBADMSG; } _leave(" = 0 [incomplete]"); return 0; } call->state = AFS_CALL_AWAIT_REQUEST; /* ask the cache manager to route the call (it'll change the call type * if successful) */ if (!afs_cm_incoming_call(call)) return -ENOTSUPP; /* pass responsibility for the remainer of this message off to the * cache manager op */ return call->type->deliver(call, skb, last); } /* * send an empty reply */ void afs_send_empty_reply(struct afs_call *call) { struct msghdr msg; struct iovec iov[1]; _enter(""); iov[0].iov_base = NULL; iov[0].iov_len = 0; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; call->state = AFS_CALL_AWAIT_ACK; switch (rxrpc_kernel_send_data(call->rxcall, &msg, 0)) { case 0: _leave(" [replied]"); return; case -ENOMEM: _debug("oom"); rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); default: rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; call->type->destructor(call); afs_free_call(call); _leave(" [error]"); return; } } /* * send a simple reply */ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) { struct msghdr msg; struct iovec iov[1]; int n; _enter(""); iov[0].iov_base = (void *) buf; iov[0].iov_len = len; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; call->state = AFS_CALL_AWAIT_ACK; n = rxrpc_kernel_send_data(call->rxcall, &msg, len); if (n >= 0) { _leave(" [replied]"); return; } if (n == -ENOMEM) { _debug("oom"); rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); } rxrpc_kernel_end_call(call->rxcall); call->rxcall = NULL; call->type->destructor(call); afs_free_call(call); _leave(" [error]"); } /* * extract a piece of data from the received data socket buffers */ int afs_extract_data(struct afs_call *call, struct sk_buff *skb, bool last, void *buf, size_t count) { size_t len = skb->len; _enter("{%u},{%zu},%d,,%zu", call->offset, len, last, count); ASSERTCMP(call->offset, <, count); len = min_t(size_t, len, count - call->offset); if (skb_copy_bits(skb, 0, buf + call->offset, len) < 0 || !pskb_pull(skb, len)) BUG(); call->offset += len; if (call->offset < count) { if (last) { _leave(" = -EBADMSG [%d < %zu]", call->offset, count); return -EBADMSG; } _leave(" = -EAGAIN"); return -EAGAIN; } return 0; }
gpl-2.0
ExPeacer/CAF_android-msm-3.0
sound/core/pcm_timer.c
11677
3755
/* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <linux/gcd.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/timer.h> /* * Timer functions */ void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream) { unsigned long rate, mult, fsize, l, post; struct snd_pcm_runtime *runtime = substream->runtime; mult = 1000000000; rate = runtime->rate; if (snd_BUG_ON(!rate)) return; l = gcd(mult, rate); mult /= l; rate /= l; fsize = runtime->period_size; if (snd_BUG_ON(!fsize)) return; l = gcd(rate, fsize); rate /= l; fsize /= l; post = 1; while ((mult * fsize) / fsize != mult) { mult /= 2; post *= 2; } if (rate == 0) { snd_printk(KERN_ERR "pcm timer resolution out of range (rate = %u, period_size = %lu)\n", runtime->rate, runtime->period_size); runtime->timer_resolution = -1; return; } runtime->timer_resolution = (mult * fsize / rate) * post; } static unsigned long snd_pcm_timer_resolution(struct snd_timer * timer) { struct snd_pcm_substream *substream; substream = timer->private_data; return substream->runtime ? substream->runtime->timer_resolution : 0; } static int snd_pcm_timer_start(struct snd_timer * timer) { struct snd_pcm_substream *substream; substream = snd_timer_chip(timer); substream->timer_running = 1; return 0; } static int snd_pcm_timer_stop(struct snd_timer * timer) { struct snd_pcm_substream *substream; substream = snd_timer_chip(timer); substream->timer_running = 0; return 0; } static struct snd_timer_hardware snd_pcm_timer = { .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_SLAVE, .resolution = 0, .ticks = 1, .c_resolution = snd_pcm_timer_resolution, .start = snd_pcm_timer_start, .stop = snd_pcm_timer_stop, }; /* * Init functions */ static void snd_pcm_timer_free(struct snd_timer *timer) { struct snd_pcm_substream *substream = timer->private_data; substream->timer = NULL; } void snd_pcm_timer_init(struct snd_pcm_substream *substream) { struct snd_timer_id tid; struct snd_timer *timer; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.dev_class = SNDRV_TIMER_CLASS_PCM; tid.card = substream->pcm->card->number; tid.device = substream->pcm->device; tid.subdevice = (substream->number << 1) | (substream->stream & 1); if (snd_timer_new(substream->pcm->card, "PCM", &tid, &timer) < 0) return; sprintf(timer->name, "PCM %s %i-%i-%i", substream->stream == SNDRV_PCM_STREAM_CAPTURE ? "capture" : "playback", tid.card, tid.device, tid.subdevice); timer->hw = snd_pcm_timer; if (snd_device_register(timer->card, timer) < 0) { snd_device_free(timer->card, timer); return; } timer->private_data = substream; timer->private_free = snd_pcm_timer_free; substream->timer = timer; } void snd_pcm_timer_done(struct snd_pcm_substream *substream) { if (substream->timer) { snd_device_free(substream->pcm->card, substream->timer); substream->timer = NULL; } }
gpl-2.0
code4rain/Tizen-Odroid-Kernel_3_10
arch/x86/boot/memory.c
12445
3367
/* -*- linux-c -*- ------------------------------------------------------- * * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved * Copyright 2009 Intel Corporation; author H. Peter Anvin * * This file is part of the Linux kernel, and is made available under * the terms of the GNU General Public License version 2. * * ----------------------------------------------------------------------- */ /* * Memory detection code */ #include "boot.h" #define SMAP 0x534d4150 /* ASCII "SMAP" */ static int detect_memory_e820(void) { int count = 0; struct biosregs ireg, oreg; struct e820entry *desc = boot_params.e820_map; static struct e820entry buf; /* static so it is zeroed */ initregs(&ireg); ireg.ax = 0xe820; ireg.cx = sizeof buf; ireg.edx = SMAP; ireg.di = (size_t)&buf; /* * Note: at least one BIOS is known which assumes that the * buffer pointed to by one e820 call is the same one as * the previous call, and only changes modified fields. Therefore, * we use a temporary buffer and copy the results entry by entry. * * This routine deliberately does not try to account for * ACPI 3+ extended attributes. This is because there are * BIOSes in the field which report zero for the valid bit for * all ranges, and we don't currently make any use of the * other attribute bits. Revisit this if we see the extended * attribute bits deployed in a meaningful way in the future. */ do { intcall(0x15, &ireg, &oreg); ireg.ebx = oreg.ebx; /* for next iteration... */ /* BIOSes which terminate the chain with CF = 1 as opposed to %ebx = 0 don't always report the SMAP signature on the final, failing, probe. */ if (oreg.eflags & X86_EFLAGS_CF) break; /* Some BIOSes stop returning SMAP in the middle of the search loop. We don't know exactly how the BIOS screwed up the map at that point, we might have a partial map, the full map, or complete garbage, so just return failure. */ if (oreg.eax != SMAP) { count = 0; break; } *desc++ = buf; count++; } while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_map)); return boot_params.e820_entries = count; } static int detect_memory_e801(void) { struct biosregs ireg, oreg; initregs(&ireg); ireg.ax = 0xe801; intcall(0x15, &ireg, &oreg); if (oreg.eflags & X86_EFLAGS_CF) return -1; /* Do we really need to do this? */ if (oreg.cx || oreg.dx) { oreg.ax = oreg.cx; oreg.bx = oreg.dx; } if (oreg.ax > 15*1024) { return -1; /* Bogus! */ } else if (oreg.ax == 15*1024) { boot_params.alt_mem_k = (oreg.bx << 6) + oreg.ax; } else { /* * This ignores memory above 16MB if we have a memory * hole there. If someone actually finds a machine * with a memory hole at 16MB and no support for * 0E820h they should probably generate a fake e820 * map. */ boot_params.alt_mem_k = oreg.ax; } return 0; } static int detect_memory_88(void) { struct biosregs ireg, oreg; initregs(&ireg); ireg.ah = 0x88; intcall(0x15, &ireg, &oreg); boot_params.screen_info.ext_mem_k = oreg.ax; return -(oreg.eflags & X86_EFLAGS_CF); /* 0 or -1 */ } int detect_memory(void) { int err = -1; if (detect_memory_e820() > 0) err = 0; if (!detect_memory_e801()) err = 0; if (!detect_memory_88()) err = 0; return err; }
gpl-2.0
mkl0301/linux
drivers/block/ps3vram.c
158
22566
/* * ps3vram - Use extra PS3 video ram as MTD block device. * * Copyright 2009 Sony Corporation * * Based on the MTD ps3vram driver, which is * Copyright (c) 2007-2008 Jim Paris <jim@jtan.com> * Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr> */ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/cell-regs.h> #include <asm/firmware.h> #include <asm/lv1call.h> #include <asm/ps3.h> #include <asm/ps3gpu.h> #define DEVICE_NAME "ps3vram" #define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */ #define XDR_IOIF 0x0c000000 #define FIFO_BASE XDR_IOIF #define FIFO_SIZE (64 * 1024) #define DMA_PAGE_SIZE (4 * 1024) #define CACHE_PAGE_SIZE (256 * 1024) #define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE) #define CACHE_OFFSET CACHE_PAGE_SIZE #define FIFO_OFFSET 0 #define CTRL_PUT 0x10 #define CTRL_GET 0x11 #define CTRL_TOP 0x15 #define UPLOAD_SUBCH 1 #define DOWNLOAD_SUBCH 2 #define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c #define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 #define CACHE_PAGE_PRESENT 1 #define CACHE_PAGE_DIRTY 2 struct ps3vram_tag { unsigned int address; unsigned int flags; }; struct ps3vram_cache { unsigned int page_count; unsigned int page_size; struct ps3vram_tag *tags; unsigned int hit; unsigned int miss; }; struct ps3vram_priv { struct request_queue *queue; struct gendisk *gendisk; u64 size; u64 memory_handle; u64 context_handle; u32 *ctrl; void *reports; u8 *xdr_buf; u32 *fifo_base; u32 *fifo_ptr; struct ps3vram_cache cache; spinlock_t lock; /* protecting list of bios */ struct bio_list list; }; static int ps3vram_major; static const struct block_device_operations ps3vram_fops = { .owner = THIS_MODULE, }; #define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */ #define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */ #define DMA_NOTIFIER_SIZE 0x40 #define NOTIFIER 7 /* notifier used for completion report */ static char *size = "256M"; module_param(size, charp, 0); MODULE_PARM_DESC(size, "memory size"); static u32 *ps3vram_get_notifier(void *reports, int notifier) { return reports + DMA_NOTIFIER_OFFSET_BASE + DMA_NOTIFIER_SIZE * notifier; } static void ps3vram_notifier_reset(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); int i; for (i = 0; i < 4; i++) notify[i] = 0xffffffff; } static int ps3vram_notifier_wait(struct ps3_system_bus_device *dev, unsigned int timeout_ms) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); unsigned long timeout; for (timeout = 20; timeout; timeout--) { if (!notify[3]) return 0; udelay(10); } timeout = jiffies + msecs_to_jiffies(timeout_ms); do { if (!notify[3]) return 0; msleep(1); } while (time_before(jiffies, timeout)); return -ETIMEDOUT; } static void ps3vram_init_ring(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET; } static int ps3vram_wait_ring(struct ps3_system_bus_device *dev, unsigned int timeout_ms) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); do { if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET]) return 0; msleep(1); } while (time_before(jiffies, timeout)); dev_warn(&dev->core, "FIFO timeout (%08x/%08x/%08x)\n", priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET], priv->ctrl[CTRL_TOP]); return -ETIMEDOUT; } static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data) { *(priv->fifo_ptr)++ = data; } static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan, u32 tag, u32 size) { ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag); } static void ps3vram_rewind_ring(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); int status; ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET)); priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; /* asking the HV for a blit will kick the FIFO */ status = lv1_gpu_fb_blit(priv->context_handle, 0, 0, 0, 0); if (status) dev_err(&dev->core, "%s: lv1_gpu_fb_blit failed %d\n", __func__, status); priv->fifo_ptr = priv->fifo_base; } static void ps3vram_fire_ring(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); int status; mutex_lock(&ps3_gpu_mutex); priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET + (priv->fifo_ptr - priv->fifo_base) * sizeof(u32); /* asking the HV for a blit will kick the FIFO */ status = lv1_gpu_fb_blit(priv->context_handle, 0, 0, 0, 0); if (status) dev_err(&dev->core, "%s: lv1_gpu_fb_blit failed %d\n", __func__, status); if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) > FIFO_SIZE - 1024) { dev_dbg(&dev->core, "FIFO full, rewinding\n"); ps3vram_wait_ring(dev, 200); ps3vram_rewind_ring(dev); } mutex_unlock(&ps3_gpu_mutex); } static void ps3vram_bind(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1); ps3vram_out_ring(priv, 0x31337303); ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3); ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1); ps3vram_out_ring(priv, 0x3137c0de); ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3); ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ ps3vram_fire_ring(dev); } static int ps3vram_upload(struct ps3_system_bus_device *dev, unsigned int src_offset, unsigned int dst_offset, int len, int count) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); ps3vram_begin_ring(priv, UPLOAD_SUBCH, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); ps3vram_out_ring(priv, XDR_IOIF + src_offset); ps3vram_out_ring(priv, dst_offset); ps3vram_out_ring(priv, len); ps3vram_out_ring(priv, len); ps3vram_out_ring(priv, len); ps3vram_out_ring(priv, count); ps3vram_out_ring(priv, (1 << 8) | 1); ps3vram_out_ring(priv, 0); ps3vram_notifier_reset(dev); ps3vram_begin_ring(priv, UPLOAD_SUBCH, NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); ps3vram_out_ring(priv, 0); ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1); ps3vram_out_ring(priv, 0); ps3vram_fire_ring(dev); if (ps3vram_notifier_wait(dev, 200) < 0) { dev_warn(&dev->core, "%s: Notifier timeout\n", __func__); return -1; } return 0; } static int ps3vram_download(struct ps3_system_bus_device *dev, unsigned int src_offset, unsigned int dst_offset, int len, int count) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); ps3vram_out_ring(priv, src_offset); ps3vram_out_ring(priv, XDR_IOIF + dst_offset); ps3vram_out_ring(priv, len); ps3vram_out_ring(priv, len); ps3vram_out_ring(priv, len); ps3vram_out_ring(priv, count); ps3vram_out_ring(priv, (1 << 8) | 1); ps3vram_out_ring(priv, 0); ps3vram_notifier_reset(dev); ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); ps3vram_out_ring(priv, 0); ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1); ps3vram_out_ring(priv, 0); ps3vram_fire_ring(dev); if (ps3vram_notifier_wait(dev, 200) < 0) { dev_warn(&dev->core, "%s: Notifier timeout\n", __func__); return -1; } return 0; } static void ps3vram_cache_evict(struct ps3_system_bus_device *dev, int entry) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); struct ps3vram_cache *cache = &priv->cache; if (!(cache->tags[entry].flags & CACHE_PAGE_DIRTY)) return; dev_dbg(&dev->core, "Flushing %d: 0x%08x\n", entry, cache->tags[entry].address); if (ps3vram_upload(dev, CACHE_OFFSET + entry * cache->page_size, cache->tags[entry].address, DMA_PAGE_SIZE, cache->page_size / DMA_PAGE_SIZE) < 0) { dev_err(&dev->core, "Failed to upload from 0x%x to " "0x%x size 0x%x\n", entry * cache->page_size, cache->tags[entry].address, cache->page_size); } cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY; } static void ps3vram_cache_load(struct ps3_system_bus_device *dev, int entry, unsigned int address) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); struct ps3vram_cache *cache = &priv->cache; dev_dbg(&dev->core, "Fetching %d: 0x%08x\n", entry, address); if (ps3vram_download(dev, address, CACHE_OFFSET + entry * cache->page_size, DMA_PAGE_SIZE, cache->page_size / DMA_PAGE_SIZE) < 0) { dev_err(&dev->core, "Failed to download from 0x%x to 0x%x size 0x%x\n", address, entry * cache->page_size, cache->page_size); } cache->tags[entry].address = address; cache->tags[entry].flags |= CACHE_PAGE_PRESENT; } static void ps3vram_cache_flush(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); struct ps3vram_cache *cache = &priv->cache; int i; dev_dbg(&dev->core, "FLUSH\n"); for (i = 0; i < cache->page_count; i++) { ps3vram_cache_evict(dev, i); cache->tags[i].flags = 0; } } static unsigned int ps3vram_cache_match(struct ps3_system_bus_device *dev, loff_t address) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); struct ps3vram_cache *cache = &priv->cache; unsigned int base; unsigned int offset; int i; static int counter; offset = (unsigned int) (address & (cache->page_size - 1)); base = (unsigned int) (address - offset); /* fully associative check */ for (i = 0; i < cache->page_count; i++) { if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) && cache->tags[i].address == base) { cache->hit++; dev_dbg(&dev->core, "Found entry %d: 0x%08x\n", i, cache->tags[i].address); return i; } } /* choose a random entry */ i = (jiffies + (counter++)) % cache->page_count; dev_dbg(&dev->core, "Using entry %d\n", i); ps3vram_cache_evict(dev, i); ps3vram_cache_load(dev, i, base); cache->miss++; return i; } static int ps3vram_cache_init(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); priv->cache.page_count = CACHE_PAGE_COUNT; priv->cache.page_size = CACHE_PAGE_SIZE; priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) * CACHE_PAGE_COUNT, GFP_KERNEL); if (priv->cache.tags == NULL) { dev_err(&dev->core, "Could not allocate cache tags\n"); return -ENOMEM; } dev_info(&dev->core, "Created ram cache: %d entries, %d KiB each\n", CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024); return 0; } static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); ps3vram_cache_flush(dev); kfree(priv->cache.tags); } static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); unsigned int cached, count; dev_dbg(&dev->core, "%s: from=0x%08x len=0x%zx\n", __func__, (unsigned int)from, len); if (from >= priv->size) return -EIO; if (len > priv->size - from) len = priv->size - from; /* Copy from vram to buf */ count = len; while (count) { unsigned int offset, avail; unsigned int entry; offset = (unsigned int) (from & (priv->cache.page_size - 1)); avail = priv->cache.page_size - offset; entry = ps3vram_cache_match(dev, from); cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; dev_dbg(&dev->core, "%s: from=%08x cached=%08x offset=%08x " "avail=%08x count=%08x\n", __func__, (unsigned int)from, cached, offset, avail, count); if (avail > count) avail = count; memcpy(buf, priv->xdr_buf + cached, avail); buf += avail; count -= avail; from += avail; } *retlen = len; return 0; } static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); unsigned int cached, count; if (to >= priv->size) return -EIO; if (len > priv->size - to) len = priv->size - to; /* Copy from buf to vram */ count = len; while (count) { unsigned int offset, avail; unsigned int entry; offset = (unsigned int) (to & (priv->cache.page_size - 1)); avail = priv->cache.page_size - offset; entry = ps3vram_cache_match(dev, to); cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; dev_dbg(&dev->core, "%s: to=%08x cached=%08x offset=%08x " "avail=%08x count=%08x\n", __func__, (unsigned int)to, cached, offset, avail, count); if (avail > count) avail = count; memcpy(priv->xdr_buf + cached, buf, avail); priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY; buf += avail; count -= avail; to += avail; } *retlen = len; return 0; } static int ps3vram_proc_show(struct seq_file *m, void *v) { struct ps3vram_priv *priv = m->private; seq_printf(m, "hit:%u\nmiss:%u\n", priv->cache.hit, priv->cache.miss); return 0; } static int ps3vram_proc_open(struct inode *inode, struct file *file) { return single_open(file, ps3vram_proc_show, PDE(inode)->data); } static const struct file_operations ps3vram_proc_fops = { .owner = THIS_MODULE, .open = ps3vram_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void ps3vram_proc_init(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); struct proc_dir_entry *pde; pde = proc_create_data(DEVICE_NAME, 0444, NULL, &ps3vram_proc_fops, priv); if (!pde) dev_warn(&dev->core, "failed to create /proc entry\n"); } static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev, struct bio *bio) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); int write = bio_data_dir(bio) == WRITE; const char *op = write ? "write" : "read"; loff_t offset = bio->bi_sector << 9; int error = 0; struct bio_vec *bvec; unsigned int i; struct bio *next; bio_for_each_segment(bvec, bio, i) { /* PS3 is ppc64, so we don't handle highmem */ char *ptr = page_address(bvec->bv_page) + bvec->bv_offset; size_t len = bvec->bv_len, retlen; dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op, len, offset); if (write) error = ps3vram_write(dev, offset, len, &retlen, ptr); else error = ps3vram_read(dev, offset, len, &retlen, ptr); if (error) { dev_err(&dev->core, "%s failed\n", op); goto out; } if (retlen != len) { dev_err(&dev->core, "Short %s\n", op); error = -EIO; goto out; } offset += len; } dev_dbg(&dev->core, "%s completed\n", op); out: spin_lock_irq(&priv->lock); bio_list_pop(&priv->list); next = bio_list_peek(&priv->list); spin_unlock_irq(&priv->lock); bio_endio(bio, error); return next; } static void ps3vram_make_request(struct request_queue *q, struct bio *bio) { struct ps3_system_bus_device *dev = q->queuedata; struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); int busy; dev_dbg(&dev->core, "%s\n", __func__); spin_lock_irq(&priv->lock); busy = !bio_list_empty(&priv->list); bio_list_add(&priv->list, bio); spin_unlock_irq(&priv->lock); if (busy) return; do { bio = ps3vram_do_bio(dev, bio); } while (bio); } static int ps3vram_probe(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv; int error, status; struct request_queue *queue; struct gendisk *gendisk; u64 ddr_size, ddr_lpar, ctrl_lpar, info_lpar, reports_lpar, reports_size, xdr_lpar; char *rest; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { error = -ENOMEM; goto fail; } spin_lock_init(&priv->lock); bio_list_init(&priv->list); ps3_system_bus_set_drvdata(dev, priv); /* Allocate XDR buffer (1MiB aligned) */ priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL, get_order(XDR_BUF_SIZE)); if (priv->xdr_buf == NULL) { dev_err(&dev->core, "Could not allocate XDR buffer\n"); error = -ENOMEM; goto fail_free_priv; } /* Put FIFO at begginning of XDR buffer */ priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET); priv->fifo_ptr = priv->fifo_base; /* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */ if (ps3_open_hv_device(dev)) { dev_err(&dev->core, "ps3_open_hv_device failed\n"); error = -EAGAIN; goto out_free_xdr_buf; } /* Request memory */ status = -1; ddr_size = ALIGN(memparse(size, &rest), 1024*1024); if (!ddr_size) { dev_err(&dev->core, "Specified size is too small\n"); error = -EINVAL; goto out_close_gpu; } while (ddr_size > 0) { status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0, &priv->memory_handle, &ddr_lpar); if (!status) break; ddr_size -= 1024*1024; } if (status) { dev_err(&dev->core, "lv1_gpu_memory_allocate failed %d\n", status); error = -ENOMEM; goto out_close_gpu; } /* Request context */ status = lv1_gpu_context_allocate(priv->memory_handle, 0, &priv->context_handle, &ctrl_lpar, &info_lpar, &reports_lpar, &reports_size); if (status) { dev_err(&dev->core, "lv1_gpu_context_allocate failed %d\n", status); error = -ENOMEM; goto out_free_memory; } /* Map XDR buffer to RSX */ xdr_lpar = ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)); status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF, xdr_lpar, XDR_BUF_SIZE, CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M); if (status) { dev_err(&dev->core, "lv1_gpu_context_iomap failed %d\n", status); error = -ENOMEM; goto out_free_context; } priv->ctrl = ioremap(ctrl_lpar, 64 * 1024); if (!priv->ctrl) { dev_err(&dev->core, "ioremap CTRL failed\n"); error = -ENOMEM; goto out_unmap_context; } priv->reports = ioremap(reports_lpar, reports_size); if (!priv->reports) { dev_err(&dev->core, "ioremap REPORTS failed\n"); error = -ENOMEM; goto out_unmap_ctrl; } mutex_lock(&ps3_gpu_mutex); ps3vram_init_ring(dev); mutex_unlock(&ps3_gpu_mutex); priv->size = ddr_size; ps3vram_bind(dev); mutex_lock(&ps3_gpu_mutex); error = ps3vram_wait_ring(dev, 100); mutex_unlock(&ps3_gpu_mutex); if (error < 0) { dev_err(&dev->core, "Failed to initialize channels\n"); error = -ETIMEDOUT; goto out_unmap_reports; } ps3vram_cache_init(dev); ps3vram_proc_init(dev); queue = blk_alloc_queue(GFP_KERNEL); if (!queue) { dev_err(&dev->core, "blk_alloc_queue failed\n"); error = -ENOMEM; goto out_cache_cleanup; } priv->queue = queue; queue->queuedata = dev; blk_queue_make_request(queue, ps3vram_make_request); blk_queue_max_segments(queue, BLK_MAX_SEGMENTS); blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE); blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS); gendisk = alloc_disk(1); if (!gendisk) { dev_err(&dev->core, "alloc_disk failed\n"); error = -ENOMEM; goto fail_cleanup_queue; } priv->gendisk = gendisk; gendisk->major = ps3vram_major; gendisk->first_minor = 0; gendisk->fops = &ps3vram_fops; gendisk->queue = queue; gendisk->private_data = dev; gendisk->driverfs_dev = &dev->core; strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name)); set_capacity(gendisk, priv->size >> 9); dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n", gendisk->disk_name, get_capacity(gendisk) >> 11); add_disk(gendisk); return 0; fail_cleanup_queue: blk_cleanup_queue(queue); out_cache_cleanup: remove_proc_entry(DEVICE_NAME, NULL); ps3vram_cache_cleanup(dev); out_unmap_reports: iounmap(priv->reports); out_unmap_ctrl: iounmap(priv->ctrl); out_unmap_context: lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF, xdr_lpar, XDR_BUF_SIZE, CBE_IOPTE_M); out_free_context: lv1_gpu_context_free(priv->context_handle); out_free_memory: lv1_gpu_memory_free(priv->memory_handle); out_close_gpu: ps3_close_hv_device(dev); out_free_xdr_buf: free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); fail_free_priv: kfree(priv); ps3_system_bus_set_drvdata(dev, NULL); fail: return error; } static int ps3vram_remove(struct ps3_system_bus_device *dev) { struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); del_gendisk(priv->gendisk); put_disk(priv->gendisk); blk_cleanup_queue(priv->queue); remove_proc_entry(DEVICE_NAME, NULL); ps3vram_cache_cleanup(dev); iounmap(priv->reports); iounmap(priv->ctrl); lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF, ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)), XDR_BUF_SIZE, CBE_IOPTE_M); lv1_gpu_context_free(priv->context_handle); lv1_gpu_memory_free(priv->memory_handle); ps3_close_hv_device(dev); free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); kfree(priv); ps3_system_bus_set_drvdata(dev, NULL); return 0; } static struct ps3_system_bus_driver ps3vram = { .match_id = PS3_MATCH_ID_GPU, .match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK, .core.name = DEVICE_NAME, .core.owner = THIS_MODULE, .probe = ps3vram_probe, .remove = ps3vram_remove, .shutdown = ps3vram_remove, }; static int __init ps3vram_init(void) { int error; if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) return -ENODEV; error = register_blkdev(0, DEVICE_NAME); if (error <= 0) { pr_err("%s: register_blkdev failed %d\n", DEVICE_NAME, error); return error; } ps3vram_major = error; pr_info("%s: registered block device major %d\n", DEVICE_NAME, ps3vram_major); error = ps3_system_bus_driver_register(&ps3vram); if (error) unregister_blkdev(ps3vram_major, DEVICE_NAME); return error; } static void __exit ps3vram_exit(void) { ps3_system_bus_driver_unregister(&ps3vram); unregister_blkdev(ps3vram_major, DEVICE_NAME); } module_init(ps3vram_init); module_exit(ps3vram_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PS3 Video RAM Storage Driver"); MODULE_AUTHOR("Sony Corporation"); MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK);
gpl-2.0
Chadder43/Asus-T200TA-Linux
arch/mips/bcm47xx/sprom.c
414
30719
/* * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org> * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org> * Copyright (C) 2006 Michael Buesch <m@bues.ch> * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org> * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <bcm47xx.h> #include <bcm47xx_nvram.h> #include <linux/if_ether.h> #include <linux/etherdevice.h> static void create_key(const char *prefix, const char *postfix, const char *name, char *buf, int len) { if (prefix && postfix) snprintf(buf, len, "%s%s%s", prefix, name, postfix); else if (prefix) snprintf(buf, len, "%s%s", prefix, name); else if (postfix) snprintf(buf, len, "%s%s", name, postfix); else snprintf(buf, len, "%s", name); } static int get_nvram_var(const char *prefix, const char *postfix, const char *name, char *buf, int len, bool fallback) { char key[40]; int err; create_key(prefix, postfix, name, key, sizeof(key)); err = bcm47xx_nvram_getenv(key, buf, len); if (fallback && err == -ENOENT && prefix) { create_key(NULL, postfix, name, key, sizeof(key)); err = bcm47xx_nvram_getenv(key, buf, len); } return err; } #define NVRAM_READ_VAL(type) \ static void nvram_read_ ## type (const char *prefix, \ const char *postfix, const char *name, \ type *val, type allset, bool fallback) \ { \ char buf[100]; \ int err; \ type var; \ \ err = get_nvram_var(prefix, postfix, name, buf, sizeof(buf), \ fallback); \ if (err < 0) \ return; \ err = kstrto ## type(strim(buf), 0, &var); \ if (err) { \ pr_warn("can not parse nvram name %s%s%s with value %s got %i\n", \ prefix, name, postfix, buf, err); \ return; \ } \ if (allset && var == allset) \ return; \ *val = var; \ } NVRAM_READ_VAL(u8) NVRAM_READ_VAL(s8) NVRAM_READ_VAL(u16) NVRAM_READ_VAL(u32) #undef NVRAM_READ_VAL static void nvram_read_u32_2(const char *prefix, const char *name, u16 *val_lo, u16 *val_hi, bool fallback) { char buf[100]; int err; u32 val; err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback); if (err < 0) return; err = kstrtou32(strim(buf), 0, &val); if (err) { pr_warn("can not parse nvram name %s%s with value %s got %i\n", prefix, name, buf, err); return; } *val_lo = (val & 0x0000FFFFU); *val_hi = (val & 0xFFFF0000U) >> 16; } static void nvram_read_leddc(const char *prefix, const char *name, u8 *leddc_on_time, u8 *leddc_off_time, bool fallback) { char buf[100]; int err; u32 val; err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback); if (err < 0) return; err = kstrtou32(strim(buf), 0, &val); if (err) { pr_warn("can not parse nvram name %s%s with value %s got %i\n", prefix, name, buf, err); return; } if (val == 0xffff || val == 0xffffffff) return; *leddc_on_time = val & 0xff; *leddc_off_time = (val >> 16) & 0xff; } static void nvram_read_macaddr(const char *prefix, const char *name, u8 val[6], bool fallback) { char buf[100]; int err; err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback); if (err < 0) return; bcm47xx_nvram_parse_macaddr(buf, val); } static void nvram_read_alpha2(const char *prefix, const char *name, char val[2], bool fallback) { char buf[10]; int err; err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback); if (err < 0) return; if (buf[0] == '0') return; if (strlen(buf) > 2) { pr_warn("alpha2 is too long %s\n", buf); return; } memcpy(val, buf, 2); } static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_u16(prefix, NULL, "devid", &sprom->dev_id, 0, fallback); nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff, fallback); nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff, fallback); nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff, fallback); nvram_read_u8(prefix, NULL, "ledbh3", &sprom->gpio3, 0xff, fallback); nvram_read_u8(prefix, NULL, "aa2g", &sprom->ant_available_bg, 0, fallback); nvram_read_u8(prefix, NULL, "aa5g", &sprom->ant_available_a, 0, fallback); nvram_read_s8(prefix, NULL, "ag0", &sprom->antenna_gain.a0, 0, fallback); nvram_read_s8(prefix, NULL, "ag1", &sprom->antenna_gain.a1, 0, fallback); nvram_read_alpha2(prefix, "ccode", sprom->alpha2, fallback); } static void bcm47xx_fill_sprom_r12389(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_u16(prefix, NULL, "pa0b0", &sprom->pa0b0, 0, fallback); nvram_read_u16(prefix, NULL, "pa0b1", &sprom->pa0b1, 0, fallback); nvram_read_u16(prefix, NULL, "pa0b2", &sprom->pa0b2, 0, fallback); nvram_read_u8(prefix, NULL, "pa0itssit", &sprom->itssi_bg, 0, fallback); nvram_read_u8(prefix, NULL, "pa0maxpwr", &sprom->maxpwr_bg, 0, fallback); nvram_read_u16(prefix, NULL, "pa1b0", &sprom->pa1b0, 0, fallback); nvram_read_u16(prefix, NULL, "pa1b1", &sprom->pa1b1, 0, fallback); nvram_read_u16(prefix, NULL, "pa1b2", &sprom->pa1b2, 0, fallback); nvram_read_u8(prefix, NULL, "pa1itssit", &sprom->itssi_a, 0, fallback); nvram_read_u8(prefix, NULL, "pa1maxpwr", &sprom->maxpwr_a, 0, fallback); } static void bcm47xx_fill_sprom_r1(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_u16(prefix, NULL, "boardflags", &sprom->boardflags_lo, 0, fallback); nvram_read_u8(prefix, NULL, "cc", &sprom->country_code, 0, fallback); } static void bcm47xx_fill_sprom_r2389(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_u8(prefix, NULL, "opo", &sprom->opo, 0, fallback); nvram_read_u16(prefix, NULL, "pa1lob0", &sprom->pa1lob0, 0, fallback); nvram_read_u16(prefix, NULL, "pa1lob1", &sprom->pa1lob1, 0, fallback); nvram_read_u16(prefix, NULL, "pa1lob2", &sprom->pa1lob2, 0, fallback); nvram_read_u16(prefix, NULL, "pa1hib0", &sprom->pa1hib0, 0, fallback); nvram_read_u16(prefix, NULL, "pa1hib1", &sprom->pa1hib1, 0, fallback); nvram_read_u16(prefix, NULL, "pa1hib2", &sprom->pa1hib2, 0, fallback); nvram_read_u8(prefix, NULL, "pa1lomaxpwr", &sprom->maxpwr_al, 0, fallback); nvram_read_u8(prefix, NULL, "pa1himaxpwr", &sprom->maxpwr_ah, 0, fallback); } static void bcm47xx_fill_sprom_r389(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_u8(prefix, NULL, "bxa2g", &sprom->bxa2g, 0, fallback); nvram_read_u8(prefix, NULL, "rssisav2g", &sprom->rssisav2g, 0, fallback); nvram_read_u8(prefix, NULL, "rssismc2g", &sprom->rssismc2g, 0, fallback); nvram_read_u8(prefix, NULL, "rssismf2g", &sprom->rssismf2g, 0, fallback); nvram_read_u8(prefix, NULL, "bxa5g", &sprom->bxa5g, 0, fallback); nvram_read_u8(prefix, NULL, "rssisav5g", &sprom->rssisav5g, 0, fallback); nvram_read_u8(prefix, NULL, "rssismc5g", &sprom->rssismc5g, 0, fallback); nvram_read_u8(prefix, NULL, "rssismf5g", &sprom->rssismf5g, 0, fallback); nvram_read_u8(prefix, NULL, "tri2g", &sprom->tri2g, 0, fallback); nvram_read_u8(prefix, NULL, "tri5g", &sprom->tri5g, 0, fallback); nvram_read_u8(prefix, NULL, "tri5gl", &sprom->tri5gl, 0, fallback); nvram_read_u8(prefix, NULL, "tri5gh", &sprom->tri5gh, 0, fallback); nvram_read_s8(prefix, NULL, "rxpo2g", &sprom->rxpo2g, 0, fallback); nvram_read_s8(prefix, NULL, "rxpo5g", &sprom->rxpo5g, 0, fallback); } static void bcm47xx_fill_sprom_r3(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0, fallback); nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time, &sprom->leddc_off_time, fallback); } static void bcm47xx_fill_sprom_r4589(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0, fallback); nvram_read_s8(prefix, NULL, "ag2", &sprom->antenna_gain.a2, 0, fallback); nvram_read_s8(prefix, NULL, "ag3", &sprom->antenna_gain.a3, 0, fallback); nvram_read_u8(prefix, NULL, "txchain", &sprom->txchain, 0xf, fallback); nvram_read_u8(prefix, NULL, "rxchain", &sprom->rxchain, 0xf, fallback); nvram_read_u8(prefix, NULL, "antswitch", &sprom->antswitch, 0xff, fallback); nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time, &sprom->leddc_off_time, fallback); } static void bcm47xx_fill_sprom_r458(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_u16(prefix, NULL, "cck2gpo", &sprom->cck2gpo, 0, fallback); nvram_read_u32(prefix, NULL, "ofdm2gpo", &sprom->ofdm2gpo, 0, fallback); nvram_read_u32(prefix, NULL, "ofdm5gpo", &sprom->ofdm5gpo, 0, fallback); nvram_read_u32(prefix, NULL, "ofdm5glpo", &sprom->ofdm5glpo, 0, fallback); nvram_read_u32(prefix, NULL, "ofdm5ghpo", &sprom->ofdm5ghpo, 0, fallback); nvram_read_u16(prefix, NULL, "cddpo", &sprom->cddpo, 0, fallback); nvram_read_u16(prefix, NULL, "stbcpo", &sprom->stbcpo, 0, fallback); nvram_read_u16(prefix, NULL, "bw40po", &sprom->bw40po, 0, fallback); nvram_read_u16(prefix, NULL, "bwduppo", &sprom->bwduppo, 0, fallback); nvram_read_u16(prefix, NULL, "mcs2gpo0", &sprom->mcs2gpo[0], 0, fallback); nvram_read_u16(prefix, NULL, "mcs2gpo1", &sprom->mcs2gpo[1], 0, fallback); nvram_read_u16(prefix, NULL, "mcs2gpo2", &sprom->mcs2gpo[2], 0, fallback); nvram_read_u16(prefix, NULL, "mcs2gpo3", &sprom->mcs2gpo[3], 0, fallback); nvram_read_u16(prefix, NULL, "mcs2gpo4", &sprom->mcs2gpo[4], 0, fallback); nvram_read_u16(prefix, NULL, "mcs2gpo5", &sprom->mcs2gpo[5], 0, fallback); nvram_read_u16(prefix, NULL, "mcs2gpo6", &sprom->mcs2gpo[6], 0, fallback); nvram_read_u16(prefix, NULL, "mcs2gpo7", &sprom->mcs2gpo[7], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5gpo0", &sprom->mcs5gpo[0], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5gpo1", &sprom->mcs5gpo[1], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5gpo2", &sprom->mcs5gpo[2], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5gpo3", &sprom->mcs5gpo[3], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5gpo4", &sprom->mcs5gpo[4], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5gpo5", &sprom->mcs5gpo[5], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5gpo6", &sprom->mcs5gpo[6], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5gpo7", &sprom->mcs5gpo[7], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5glpo0", &sprom->mcs5glpo[0], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5glpo1", &sprom->mcs5glpo[1], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5glpo2", &sprom->mcs5glpo[2], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5glpo3", &sprom->mcs5glpo[3], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5glpo4", &sprom->mcs5glpo[4], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5glpo5", &sprom->mcs5glpo[5], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5glpo6", &sprom->mcs5glpo[6], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5glpo7", &sprom->mcs5glpo[7], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5ghpo0", &sprom->mcs5ghpo[0], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5ghpo1", &sprom->mcs5ghpo[1], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5ghpo2", &sprom->mcs5ghpo[2], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5ghpo3", &sprom->mcs5ghpo[3], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5ghpo4", &sprom->mcs5ghpo[4], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5ghpo5", &sprom->mcs5ghpo[5], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5ghpo6", &sprom->mcs5ghpo[6], 0, fallback); nvram_read_u16(prefix, NULL, "mcs5ghpo7", &sprom->mcs5ghpo[7], 0, fallback); } static void bcm47xx_fill_sprom_r45(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_u8(prefix, NULL, "txpid2ga0", &sprom->txpid2g[0], 0, fallback); nvram_read_u8(prefix, NULL, "txpid2ga1", &sprom->txpid2g[1], 0, fallback); nvram_read_u8(prefix, NULL, "txpid2ga2", &sprom->txpid2g[2], 0, fallback); nvram_read_u8(prefix, NULL, "txpid2ga3", &sprom->txpid2g[3], 0, fallback); nvram_read_u8(prefix, NULL, "txpid5ga0", &sprom->txpid5g[0], 0, fallback); nvram_read_u8(prefix, NULL, "txpid5ga1", &sprom->txpid5g[1], 0, fallback); nvram_read_u8(prefix, NULL, "txpid5ga2", &sprom->txpid5g[2], 0, fallback); nvram_read_u8(prefix, NULL, "txpid5ga3", &sprom->txpid5g[3], 0, fallback); nvram_read_u8(prefix, NULL, "txpid5gla0", &sprom->txpid5gl[0], 0, fallback); nvram_read_u8(prefix, NULL, "txpid5gla1", &sprom->txpid5gl[1], 0, fallback); nvram_read_u8(prefix, NULL, "txpid5gla2", &sprom->txpid5gl[2], 0, fallback); nvram_read_u8(prefix, NULL, "txpid5gla3", &sprom->txpid5gl[3], 0, fallback); nvram_read_u8(prefix, NULL, "txpid5gha0", &sprom->txpid5gh[0], 0, fallback); nvram_read_u8(prefix, NULL, "txpid5gha1", &sprom->txpid5gh[1], 0, fallback); nvram_read_u8(prefix, NULL, "txpid5gha2", &sprom->txpid5gh[2], 0, fallback); nvram_read_u8(prefix, NULL, "txpid5gha3", &sprom->txpid5gh[3], 0, fallback); } static void bcm47xx_fill_sprom_r89(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_u8(prefix, NULL, "tssipos2g", &sprom->fem.ghz2.tssipos, 0, fallback); nvram_read_u8(prefix, NULL, "extpagain2g", &sprom->fem.ghz2.extpa_gain, 0, fallback); nvram_read_u8(prefix, NULL, "pdetrange2g", &sprom->fem.ghz2.pdet_range, 0, fallback); nvram_read_u8(prefix, NULL, "triso2g", &sprom->fem.ghz2.tr_iso, 0, fallback); nvram_read_u8(prefix, NULL, "antswctl2g", &sprom->fem.ghz2.antswlut, 0, fallback); nvram_read_u8(prefix, NULL, "tssipos5g", &sprom->fem.ghz5.tssipos, 0, fallback); nvram_read_u8(prefix, NULL, "extpagain5g", &sprom->fem.ghz5.extpa_gain, 0, fallback); nvram_read_u8(prefix, NULL, "pdetrange5g", &sprom->fem.ghz5.pdet_range, 0, fallback); nvram_read_u8(prefix, NULL, "triso5g", &sprom->fem.ghz5.tr_iso, 0, fallback); nvram_read_u8(prefix, NULL, "antswctl5g", &sprom->fem.ghz5.antswlut, 0, fallback); nvram_read_u8(prefix, NULL, "tempthresh", &sprom->tempthresh, 0, fallback); nvram_read_u8(prefix, NULL, "tempoffset", &sprom->tempoffset, 0, fallback); nvram_read_u16(prefix, NULL, "rawtempsense", &sprom->rawtempsense, 0, fallback); nvram_read_u8(prefix, NULL, "measpower", &sprom->measpower, 0, fallback); nvram_read_u8(prefix, NULL, "tempsense_slope", &sprom->tempsense_slope, 0, fallback); nvram_read_u8(prefix, NULL, "tempcorrx", &sprom->tempcorrx, 0, fallback); nvram_read_u8(prefix, NULL, "tempsense_option", &sprom->tempsense_option, 0, fallback); nvram_read_u8(prefix, NULL, "freqoffset_corr", &sprom->freqoffset_corr, 0, fallback); nvram_read_u8(prefix, NULL, "iqcal_swp_dis", &sprom->iqcal_swp_dis, 0, fallback); nvram_read_u8(prefix, NULL, "hw_iqcal_en", &sprom->hw_iqcal_en, 0, fallback); nvram_read_u8(prefix, NULL, "elna2g", &sprom->elna2g, 0, fallback); nvram_read_u8(prefix, NULL, "elna5g", &sprom->elna5g, 0, fallback); nvram_read_u8(prefix, NULL, "phycal_tempdelta", &sprom->phycal_tempdelta, 0, fallback); nvram_read_u8(prefix, NULL, "temps_period", &sprom->temps_period, 0, fallback); nvram_read_u8(prefix, NULL, "temps_hysteresis", &sprom->temps_hysteresis, 0, fallback); nvram_read_u8(prefix, NULL, "measpower1", &sprom->measpower1, 0, fallback); nvram_read_u8(prefix, NULL, "measpower2", &sprom->measpower2, 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr2ga0", &sprom->rxgainerr2ga[0], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr2ga1", &sprom->rxgainerr2ga[1], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr2ga2", &sprom->rxgainerr2ga[2], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr5gla0", &sprom->rxgainerr5gla[0], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr5gla1", &sprom->rxgainerr5gla[1], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr5gla2", &sprom->rxgainerr5gla[2], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr5gma0", &sprom->rxgainerr5gma[0], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr5gma1", &sprom->rxgainerr5gma[1], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr5gma2", &sprom->rxgainerr5gma[2], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr5gha0", &sprom->rxgainerr5gha[0], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr5gha1", &sprom->rxgainerr5gha[1], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr5gha2", &sprom->rxgainerr5gha[2], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr5gua0", &sprom->rxgainerr5gua[0], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr5gua1", &sprom->rxgainerr5gua[1], 0, fallback); nvram_read_u8(prefix, NULL, "rxgainerr5gua2", &sprom->rxgainerr5gua[2], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl2ga0", &sprom->noiselvl2ga[0], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl2ga1", &sprom->noiselvl2ga[1], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl2ga2", &sprom->noiselvl2ga[2], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl5gla0", &sprom->noiselvl5gla[0], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl5gla1", &sprom->noiselvl5gla[1], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl5gla2", &sprom->noiselvl5gla[2], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl5gma0", &sprom->noiselvl5gma[0], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl5gma1", &sprom->noiselvl5gma[1], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl5gma2", &sprom->noiselvl5gma[2], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl5gha0", &sprom->noiselvl5gha[0], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl5gha1", &sprom->noiselvl5gha[1], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl5gha2", &sprom->noiselvl5gha[2], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl5gua0", &sprom->noiselvl5gua[0], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl5gua1", &sprom->noiselvl5gua[1], 0, fallback); nvram_read_u8(prefix, NULL, "noiselvl5gua2", &sprom->noiselvl5gua[2], 0, fallback); nvram_read_u8(prefix, NULL, "pcieingress_war", &sprom->pcieingress_war, 0, fallback); } static void bcm47xx_fill_sprom_r9(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_u16(prefix, NULL, "cckbw202gpo", &sprom->cckbw202gpo, 0, fallback); nvram_read_u16(prefix, NULL, "cckbw20ul2gpo", &sprom->cckbw20ul2gpo, 0, fallback); nvram_read_u32(prefix, NULL, "legofdmbw202gpo", &sprom->legofdmbw202gpo, 0, fallback); nvram_read_u32(prefix, NULL, "legofdmbw20ul2gpo", &sprom->legofdmbw20ul2gpo, 0, fallback); nvram_read_u32(prefix, NULL, "legofdmbw205glpo", &sprom->legofdmbw205glpo, 0, fallback); nvram_read_u32(prefix, NULL, "legofdmbw20ul5glpo", &sprom->legofdmbw20ul5glpo, 0, fallback); nvram_read_u32(prefix, NULL, "legofdmbw205gmpo", &sprom->legofdmbw205gmpo, 0, fallback); nvram_read_u32(prefix, NULL, "legofdmbw20ul5gmpo", &sprom->legofdmbw20ul5gmpo, 0, fallback); nvram_read_u32(prefix, NULL, "legofdmbw205ghpo", &sprom->legofdmbw205ghpo, 0, fallback); nvram_read_u32(prefix, NULL, "legofdmbw20ul5ghpo", &sprom->legofdmbw20ul5ghpo, 0, fallback); nvram_read_u32(prefix, NULL, "mcsbw202gpo", &sprom->mcsbw202gpo, 0, fallback); nvram_read_u32(prefix, NULL, "mcsbw20ul2gpo", &sprom->mcsbw20ul2gpo, 0, fallback); nvram_read_u32(prefix, NULL, "mcsbw402gpo", &sprom->mcsbw402gpo, 0, fallback); nvram_read_u32(prefix, NULL, "mcsbw205glpo", &sprom->mcsbw205glpo, 0, fallback); nvram_read_u32(prefix, NULL, "mcsbw20ul5glpo", &sprom->mcsbw20ul5glpo, 0, fallback); nvram_read_u32(prefix, NULL, "mcsbw405glpo", &sprom->mcsbw405glpo, 0, fallback); nvram_read_u32(prefix, NULL, "mcsbw205gmpo", &sprom->mcsbw205gmpo, 0, fallback); nvram_read_u32(prefix, NULL, "mcsbw20ul5gmpo", &sprom->mcsbw20ul5gmpo, 0, fallback); nvram_read_u32(prefix, NULL, "mcsbw405gmpo", &sprom->mcsbw405gmpo, 0, fallback); nvram_read_u32(prefix, NULL, "mcsbw205ghpo", &sprom->mcsbw205ghpo, 0, fallback); nvram_read_u32(prefix, NULL, "mcsbw20ul5ghpo", &sprom->mcsbw20ul5ghpo, 0, fallback); nvram_read_u32(prefix, NULL, "mcsbw405ghpo", &sprom->mcsbw405ghpo, 0, fallback); nvram_read_u16(prefix, NULL, "mcs32po", &sprom->mcs32po, 0, fallback); nvram_read_u16(prefix, NULL, "legofdm40duppo", &sprom->legofdm40duppo, 0, fallback); nvram_read_u8(prefix, NULL, "sar2g", &sprom->sar2g, 0, fallback); nvram_read_u8(prefix, NULL, "sar5g", &sprom->sar5g, 0, fallback); } static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom, const char *prefix, bool fallback) { char postfix[2]; int i; for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) { struct ssb_sprom_core_pwr_info *pwr_info = &sprom->core_pwr_info[i]; snprintf(postfix, sizeof(postfix), "%i", i); nvram_read_u8(prefix, postfix, "maxp2ga", &pwr_info->maxpwr_2g, 0, fallback); nvram_read_u8(prefix, postfix, "itt2ga", &pwr_info->itssi_2g, 0, fallback); nvram_read_u8(prefix, postfix, "itt5ga", &pwr_info->itssi_5g, 0, fallback); nvram_read_u16(prefix, postfix, "pa2gw0a", &pwr_info->pa_2g[0], 0, fallback); nvram_read_u16(prefix, postfix, "pa2gw1a", &pwr_info->pa_2g[1], 0, fallback); nvram_read_u16(prefix, postfix, "pa2gw2a", &pwr_info->pa_2g[2], 0, fallback); nvram_read_u8(prefix, postfix, "maxp5ga", &pwr_info->maxpwr_5g, 0, fallback); nvram_read_u8(prefix, postfix, "maxp5gha", &pwr_info->maxpwr_5gh, 0, fallback); nvram_read_u8(prefix, postfix, "maxp5gla", &pwr_info->maxpwr_5gl, 0, fallback); nvram_read_u16(prefix, postfix, "pa5gw0a", &pwr_info->pa_5g[0], 0, fallback); nvram_read_u16(prefix, postfix, "pa5gw1a", &pwr_info->pa_5g[1], 0, fallback); nvram_read_u16(prefix, postfix, "pa5gw2a", &pwr_info->pa_5g[2], 0, fallback); nvram_read_u16(prefix, postfix, "pa5glw0a", &pwr_info->pa_5gl[0], 0, fallback); nvram_read_u16(prefix, postfix, "pa5glw1a", &pwr_info->pa_5gl[1], 0, fallback); nvram_read_u16(prefix, postfix, "pa5glw2a", &pwr_info->pa_5gl[2], 0, fallback); nvram_read_u16(prefix, postfix, "pa5ghw0a", &pwr_info->pa_5gh[0], 0, fallback); nvram_read_u16(prefix, postfix, "pa5ghw1a", &pwr_info->pa_5gh[1], 0, fallback); nvram_read_u16(prefix, postfix, "pa5ghw2a", &pwr_info->pa_5gh[2], 0, fallback); } } static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom, const char *prefix, bool fallback) { char postfix[2]; int i; for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) { struct ssb_sprom_core_pwr_info *pwr_info = &sprom->core_pwr_info[i]; snprintf(postfix, sizeof(postfix), "%i", i); nvram_read_u16(prefix, postfix, "pa2gw3a", &pwr_info->pa_2g[3], 0, fallback); nvram_read_u16(prefix, postfix, "pa5gw3a", &pwr_info->pa_5g[3], 0, fallback); nvram_read_u16(prefix, postfix, "pa5glw3a", &pwr_info->pa_5gl[3], 0, fallback); nvram_read_u16(prefix, postfix, "pa5ghw3a", &pwr_info->pa_5gh[3], 0, fallback); } } static bool bcm47xx_is_valid_mac(u8 *mac) { return mac && !(mac[0] == 0x00 && mac[1] == 0x90 && mac[2] == 0x4c); } static int bcm47xx_increase_mac_addr(u8 *mac, u8 num) { u8 *oui = mac + ETH_ALEN/2 - 1; u8 *p = mac + ETH_ALEN - 1; do { (*p) += num; if (*p > num) break; p--; num = 1; } while (p != oui); if (p == oui) { pr_err("unable to fetch mac address\n"); return -ENOENT; } return 0; } static int mac_addr_used = 2; static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_macaddr(prefix, "et0macaddr", sprom->et0mac, fallback); nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0, fallback); nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0, fallback); nvram_read_macaddr(prefix, "et1macaddr", sprom->et1mac, fallback); nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0, fallback); nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0, fallback); nvram_read_macaddr(prefix, "macaddr", sprom->il0mac, fallback); nvram_read_macaddr(prefix, "il0macaddr", sprom->il0mac, fallback); /* The address prefix 00:90:4C is used by Broadcom in their initial configuration. When a mac address with the prefix 00:90:4C is used all devices from the same series are sharing the same mac address. To prevent mac address collisions we replace them with a mac address based on the base address. */ if (!bcm47xx_is_valid_mac(sprom->il0mac)) { u8 mac[6]; nvram_read_macaddr(NULL, "et0macaddr", mac, false); if (bcm47xx_is_valid_mac(mac)) { int err = bcm47xx_increase_mac_addr(mac, mac_addr_used); if (!err) { ether_addr_copy(sprom->il0mac, mac); mac_addr_used++; } } } } static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix, bool fallback) { nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0, true); nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0, fallback); nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0, true); nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo, &sprom->boardflags_hi, fallback); nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo, &sprom->boardflags2_hi, fallback); } void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix, bool fallback) { bcm47xx_fill_sprom_ethernet(sprom, prefix, fallback); bcm47xx_fill_board_data(sprom, prefix, fallback); nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0, fallback); switch (sprom->revision) { case 1: bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); bcm47xx_fill_sprom_r12389(sprom, prefix, fallback); bcm47xx_fill_sprom_r1(sprom, prefix, fallback); break; case 2: bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); bcm47xx_fill_sprom_r12389(sprom, prefix, fallback); bcm47xx_fill_sprom_r2389(sprom, prefix, fallback); break; case 3: bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); bcm47xx_fill_sprom_r12389(sprom, prefix, fallback); bcm47xx_fill_sprom_r2389(sprom, prefix, fallback); bcm47xx_fill_sprom_r389(sprom, prefix, fallback); bcm47xx_fill_sprom_r3(sprom, prefix, fallback); break; case 4: case 5: bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); bcm47xx_fill_sprom_r4589(sprom, prefix, fallback); bcm47xx_fill_sprom_r458(sprom, prefix, fallback); bcm47xx_fill_sprom_r45(sprom, prefix, fallback); bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback); bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback); break; case 8: bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); bcm47xx_fill_sprom_r12389(sprom, prefix, fallback); bcm47xx_fill_sprom_r2389(sprom, prefix, fallback); bcm47xx_fill_sprom_r389(sprom, prefix, fallback); bcm47xx_fill_sprom_r4589(sprom, prefix, fallback); bcm47xx_fill_sprom_r458(sprom, prefix, fallback); bcm47xx_fill_sprom_r89(sprom, prefix, fallback); bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback); break; case 9: bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); bcm47xx_fill_sprom_r12389(sprom, prefix, fallback); bcm47xx_fill_sprom_r2389(sprom, prefix, fallback); bcm47xx_fill_sprom_r389(sprom, prefix, fallback); bcm47xx_fill_sprom_r4589(sprom, prefix, fallback); bcm47xx_fill_sprom_r89(sprom, prefix, fallback); bcm47xx_fill_sprom_r9(sprom, prefix, fallback); bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback); break; default: pr_warn("Unsupported SPROM revision %d detected. Will extract" " v1\n", sprom->revision); sprom->revision = 1; bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback); bcm47xx_fill_sprom_r12389(sprom, prefix, fallback); bcm47xx_fill_sprom_r1(sprom, prefix, fallback); } } #ifdef CONFIG_BCM47XX_SSB void bcm47xx_fill_ssb_boardinfo(struct ssb_boardinfo *boardinfo, const char *prefix) { nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0, true); if (!boardinfo->vendor) boardinfo->vendor = SSB_BOARDVENDOR_BCM; nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0, true); } #endif #ifdef CONFIG_BCM47XX_BCMA void bcm47xx_fill_bcma_boardinfo(struct bcma_boardinfo *boardinfo, const char *prefix) { nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0, true); if (!boardinfo->vendor) boardinfo->vendor = SSB_BOARDVENDOR_BCM; nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0, true); } #endif
gpl-2.0
shesselba/linux-dove
drivers/rtc/rtc-ds1742.c
414
7261
/* * An rtc driver for the Dallas DS1742 * * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Copyright (C) 2006 Torsten Ertbjerg Rasmussen <tr@newtec.dk> * - nvram size determined from resource * - this ds1742 driver now supports ds1743. */ #include <linux/bcd.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/rtc.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/module.h> #define DRV_VERSION "0.4" #define RTC_SIZE 8 #define RTC_CONTROL 0 #define RTC_CENTURY 0 #define RTC_SECONDS 1 #define RTC_MINUTES 2 #define RTC_HOURS 3 #define RTC_DAY 4 #define RTC_DATE 5 #define RTC_MONTH 6 #define RTC_YEAR 7 #define RTC_CENTURY_MASK 0x3f #define RTC_SECONDS_MASK 0x7f #define RTC_DAY_MASK 0x07 /* Bits in the Control/Century register */ #define RTC_WRITE 0x80 #define RTC_READ 0x40 /* Bits in the Seconds register */ #define RTC_STOP 0x80 /* Bits in the Day register */ #define RTC_BATT_FLAG 0x80 struct rtc_plat_data { void __iomem *ioaddr_nvram; void __iomem *ioaddr_rtc; size_t size_nvram; unsigned long last_jiffies; struct bin_attribute nvram_attr; }; static int ds1742_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr_rtc; u8 century; century = bin2bcd((tm->tm_year + 1900) / 100); writeb(RTC_WRITE, ioaddr + RTC_CONTROL); writeb(bin2bcd(tm->tm_year % 100), ioaddr + RTC_YEAR); writeb(bin2bcd(tm->tm_mon + 1), ioaddr + RTC_MONTH); writeb(bin2bcd(tm->tm_wday) & RTC_DAY_MASK, ioaddr + RTC_DAY); writeb(bin2bcd(tm->tm_mday), ioaddr + RTC_DATE); writeb(bin2bcd(tm->tm_hour), ioaddr + RTC_HOURS); writeb(bin2bcd(tm->tm_min), ioaddr + RTC_MINUTES); writeb(bin2bcd(tm->tm_sec) & RTC_SECONDS_MASK, ioaddr + RTC_SECONDS); /* RTC_CENTURY and RTC_CONTROL share same register */ writeb(RTC_WRITE | (century & RTC_CENTURY_MASK), ioaddr + RTC_CENTURY); writeb(century & RTC_CENTURY_MASK, ioaddr + RTC_CONTROL); return 0; } static int ds1742_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr_rtc; unsigned int year, month, day, hour, minute, second, week; unsigned int century; /* give enough time to update RTC in case of continuous read */ if (pdata->last_jiffies == jiffies) msleep(1); pdata->last_jiffies = jiffies; writeb(RTC_READ, ioaddr + RTC_CONTROL); second = readb(ioaddr + RTC_SECONDS) & RTC_SECONDS_MASK; minute = readb(ioaddr + RTC_MINUTES); hour = readb(ioaddr + RTC_HOURS); day = readb(ioaddr + RTC_DATE); week = readb(ioaddr + RTC_DAY) & RTC_DAY_MASK; month = readb(ioaddr + RTC_MONTH); year = readb(ioaddr + RTC_YEAR); century = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK; writeb(0, ioaddr + RTC_CONTROL); tm->tm_sec = bcd2bin(second); tm->tm_min = bcd2bin(minute); tm->tm_hour = bcd2bin(hour); tm->tm_mday = bcd2bin(day); tm->tm_wday = bcd2bin(week); tm->tm_mon = bcd2bin(month) - 1; /* year is 1900 + tm->tm_year */ tm->tm_year = bcd2bin(year) + bcd2bin(century) * 100 - 1900; return rtc_valid_tm(tm); } static const struct rtc_class_ops ds1742_rtc_ops = { .read_time = ds1742_rtc_read_time, .set_time = ds1742_rtc_set_time, }; static ssize_t ds1742_nvram_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t size) { struct device *dev = container_of(kobj, struct device, kobj); struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr_nvram; ssize_t count; for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--) *buf++ = readb(ioaddr + pos++); return count; } static ssize_t ds1742_nvram_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t size) { struct device *dev = container_of(kobj, struct device, kobj); struct platform_device *pdev = to_platform_device(dev); struct rtc_plat_data *pdata = platform_get_drvdata(pdev); void __iomem *ioaddr = pdata->ioaddr_nvram; ssize_t count; for (count = 0; size > 0 && pos < pdata->size_nvram; count++, size--) writeb(*buf++, ioaddr + pos++); return count; } static int ds1742_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; struct resource *res; unsigned int cen, sec; struct rtc_plat_data *pdata; void __iomem *ioaddr; int ret = 0; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ioaddr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ioaddr)) return PTR_ERR(ioaddr); pdata->ioaddr_nvram = ioaddr; pdata->size_nvram = resource_size(res) - RTC_SIZE; pdata->ioaddr_rtc = ioaddr + pdata->size_nvram; sysfs_bin_attr_init(&pdata->nvram_attr); pdata->nvram_attr.attr.name = "nvram"; pdata->nvram_attr.attr.mode = S_IRUGO | S_IWUSR; pdata->nvram_attr.read = ds1742_nvram_read; pdata->nvram_attr.write = ds1742_nvram_write; pdata->nvram_attr.size = pdata->size_nvram; /* turn RTC on if it was not on */ ioaddr = pdata->ioaddr_rtc; sec = readb(ioaddr + RTC_SECONDS); if (sec & RTC_STOP) { sec &= RTC_SECONDS_MASK; cen = readb(ioaddr + RTC_CENTURY) & RTC_CENTURY_MASK; writeb(RTC_WRITE, ioaddr + RTC_CONTROL); writeb(sec, ioaddr + RTC_SECONDS); writeb(cen & RTC_CENTURY_MASK, ioaddr + RTC_CONTROL); } if (!(readb(ioaddr + RTC_DAY) & RTC_BATT_FLAG)) dev_warn(&pdev->dev, "voltage-low detected.\n"); pdata->last_jiffies = jiffies; platform_set_drvdata(pdev, pdata); rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &ds1742_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); ret = sysfs_create_bin_file(&pdev->dev.kobj, &pdata->nvram_attr); if (ret) dev_err(&pdev->dev, "Unable to create sysfs entry: %s\n", pdata->nvram_attr.attr.name); return 0; } static int ds1742_rtc_remove(struct platform_device *pdev) { struct rtc_plat_data *pdata = platform_get_drvdata(pdev); sysfs_remove_bin_file(&pdev->dev.kobj, &pdata->nvram_attr); return 0; } static const struct of_device_id __maybe_unused ds1742_rtc_of_match[] = { { .compatible = "maxim,ds1742", }, { } }; MODULE_DEVICE_TABLE(of, ds1742_rtc_of_match); static struct platform_driver ds1742_rtc_driver = { .probe = ds1742_rtc_probe, .remove = ds1742_rtc_remove, .driver = { .name = "rtc-ds1742", .owner = THIS_MODULE, .of_match_table = of_match_ptr(ds1742_rtc_of_match), }, }; module_platform_driver(ds1742_rtc_driver); MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); MODULE_DESCRIPTION("Dallas DS1742 RTC driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:rtc-ds1742");
gpl-2.0
tvall43/android_kernel_grouper
arch/mips/kernel/irq.c
414
3395
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Code to handle x86 style IRQs plus some generic interrupt stuff. * * Copyright (C) 1992 Linus Torvalds * Copyright (C) 1994 - 2000 Ralf Baechle */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/mm.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/kallsyms.h> #include <linux/kgdb.h> #include <linux/ftrace.h> #include <linux/atomic.h> #include <asm/system.h> #include <asm/uaccess.h> #ifdef CONFIG_KGDB int kgdb_early_setup; #endif static unsigned long irq_map[NR_IRQS / BITS_PER_LONG]; int allocate_irqno(void) { int irq; again: irq = find_first_zero_bit(irq_map, NR_IRQS); if (irq >= NR_IRQS) return -ENOSPC; if (test_and_set_bit(irq, irq_map)) goto again; return irq; } /* * Allocate the 16 legacy interrupts for i8259 devices. This happens early * in the kernel initialization so treating allocation failure as BUG() is * ok. */ void __init alloc_legacy_irqno(void) { int i; for (i = 0; i <= 16; i++) BUG_ON(test_and_set_bit(i, irq_map)); } void free_irqno(unsigned int irq) { smp_mb__before_clear_bit(); clear_bit(irq, irq_map); smp_mb__after_clear_bit(); } /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. */ void ack_bad_irq(unsigned int irq) { smtc_im_ack_irq(irq); printk("unexpected IRQ # %d\n", irq); } atomic_t irq_err_count; int arch_show_interrupts(struct seq_file *p, int prec) { seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); return 0; } asmlinkage void spurious_interrupt(void) { atomic_inc(&irq_err_count); } void __init init_IRQ(void) { int i; #ifdef CONFIG_KGDB if (kgdb_early_setup) return; #endif for (i = 0; i < NR_IRQS; i++) irq_set_noprobe(i); arch_init_irq(); #ifdef CONFIG_KGDB if (!kgdb_early_setup) kgdb_early_setup = 1; #endif } #ifdef DEBUG_STACKOVERFLOW static inline void check_stack_overflow(void) { unsigned long sp; __asm__ __volatile__("move %0, $sp" : "=r" (sp)); sp &= THREAD_MASK; /* * Check for stack overflow: is there less than STACK_WARN free? * STACK_WARN is defined as 1/8 of THREAD_SIZE by default. */ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { printk("do_IRQ: stack overflow: %ld\n", sp - sizeof(struct thread_info)); dump_stack(); } } #else static inline void check_stack_overflow(void) {} #endif /* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ void __irq_entry do_IRQ(unsigned int irq) { irq_enter(); check_stack_overflow(); if (!smtc_handle_on_other_cpu(irq)) generic_handle_irq(irq); irq_exit(); } #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF /* * To avoid inefficient and in some cases pathological re-checking of * IRQ affinity, we have this variant that skips the affinity check. */ void __irq_entry do_IRQ_no_affinity(unsigned int irq) { irq_enter(); smtc_im_backstop(irq); generic_handle_irq(irq); irq_exit(); } #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
gpl-2.0
leemchaehoon/linux_m
drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
414
7468
/* * Copyright (C) 2009 Francisco Jerez. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <drm/drmP.h> #include "nouveau_drm.h" #include "nouveau_reg.h" #include "nouveau_encoder.h" #include "nouveau_connector.h" #include "nouveau_crtc.h" #include "hw.h" #include <drm/drm_crtc_helper.h> #include <drm/i2c/ch7006.h> static struct nvkm_i2c_board_info nv04_tv_encoder_info[] = { { { I2C_BOARD_INFO("ch7006", 0x75), .platform_data = &(struct ch7006_encoder_params) { CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER, 0, 0, 0, CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED, CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC } }, 0 }, { } }; int nv04_tv_identify(struct drm_device *dev, int i2c_index) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); return i2c->identify(i2c, i2c_index, "TV encoder", nv04_tv_encoder_info, NULL, NULL); } #define PLLSEL_TV_CRTC1_MASK \ (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1) #define PLLSEL_TV_CRTC2_MASK \ (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2) static void nv04_tv_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; uint8_t crtc1A; NV_DEBUG(drm, "Setting dpms mode %d on TV encoder (output %d)\n", mode, nv_encoder->dcb->index); state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK); if (mode == DRM_MODE_DPMS_ON) { int head = nouveau_crtc(encoder->crtc)->index; crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX); state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK : PLLSEL_TV_CRTC1_MASK; /* Inhibit hsync */ crtc1A |= 0x80; NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A); } NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); get_slave_funcs(encoder)->dpms(encoder, mode); } static void nv04_tv_bind(struct drm_device *dev, int head, bool bind) { struct nv04_crtc_reg *state = &nv04_display(dev)->mode_reg.crtc_reg[head]; state->tv_setup = 0; if (bind) state->CRTC[NV_CIO_CRE_49] |= 0x10; else state->CRTC[NV_CIO_CRE_49] &= ~0x10; NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX, state->CRTC[NV_CIO_CRE_LCD__INDEX]); NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49, state->CRTC[NV_CIO_CRE_49]); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, state->tv_setup); } static void nv04_tv_prepare(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; int head = nouveau_crtc(encoder->crtc)->index; const struct drm_encoder_helper_funcs *helper = encoder->helper_private; helper->dpms(encoder, DRM_MODE_DPMS_OFF); nv04_dfp_disable(dev, head); if (nv_two_heads(dev)) nv04_tv_bind(dev, head ^ 1, false); nv04_tv_bind(dev, head, true); } static void nv04_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; regp->tv_htotal = adjusted_mode->htotal; regp->tv_vtotal = adjusted_mode->vtotal; /* These delay the TV signals with respect to the VGA port, * they might be useful if we ever allow a CRTC to drive * multiple outputs. */ regp->tv_hskew = 1; regp->tv_hsync_delay = 1; regp->tv_hsync_delay2 = 64; regp->tv_vskew = 1; regp->tv_vsync_delay = 1; get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode); } static void nv04_tv_commit(struct drm_encoder *encoder) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_device *dev = encoder->dev; struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); const struct drm_encoder_helper_funcs *helper = encoder->helper_private; helper->dpms(encoder, DRM_MODE_DPMS_ON); NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n", nouveau_encoder_connector_get(nv_encoder)->base.name, nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } static void nv04_tv_destroy(struct drm_encoder *encoder) { get_slave_funcs(encoder)->destroy(encoder); drm_encoder_cleanup(encoder); kfree(encoder->helper_private); kfree(nouveau_encoder(encoder)); } static const struct drm_encoder_funcs nv04_tv_funcs = { .destroy = nv04_tv_destroy, }; static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = { .dpms = nv04_tv_dpms, .save = drm_i2c_encoder_save, .restore = drm_i2c_encoder_restore, .mode_fixup = drm_i2c_encoder_mode_fixup, .prepare = nv04_tv_prepare, .commit = nv04_tv_commit, .mode_set = nv04_tv_mode_set, .detect = drm_i2c_encoder_detect, }; int nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry) { struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; struct drm_device *dev = connector->dev; struct nouveau_drm *drm = nouveau_drm(dev); struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); struct nvkm_i2c_port *port = i2c->find(i2c, entry->i2c_index); int type, ret; /* Ensure that we can talk to this encoder */ type = nv04_tv_identify(dev, entry->i2c_index); if (type < 0) return type; /* Allocate the necessary memory */ nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); if (!nv_encoder) return -ENOMEM; /* Initialize the common members */ encoder = to_drm_encoder(nv_encoder); drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC); drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs); encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; nv_encoder->dcb = entry; nv_encoder->or = ffs(entry->or) - 1; /* Run the slave-specific initialization */ ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), &port->adapter, &nv04_tv_encoder_info[type].dev); if (ret < 0) goto fail_cleanup; /* Attach it to the specified connector. */ get_slave_funcs(encoder)->create_resources(encoder, connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; fail_cleanup: drm_encoder_cleanup(encoder); kfree(nv_encoder); return ret; }
gpl-2.0
xobs/adafruit-rpi-kernel
sound/usb/pcm.c
670
43659
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/slab.h> #include <linux/bitrev.h> #include <linux/ratelimit.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/usb/audio-v2.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "usbaudio.h" #include "card.h" #include "quirks.h" #include "debug.h" #include "endpoint.h" #include "helper.h" #include "pcm.h" #include "clock.h" #include "power.h" #define SUBSTREAM_FLAG_DATA_EP_STARTED 0 #define SUBSTREAM_FLAG_SYNC_EP_STARTED 1 /* return the estimated delay based on USB frame counters */ snd_pcm_uframes_t snd_usb_pcm_delay(struct snd_usb_substream *subs, unsigned int rate) { int current_frame_number; int frame_diff; int est_delay; if (!subs->last_delay) return 0; /* short path */ current_frame_number = usb_get_current_frame_number(subs->dev); /* * HCD implementations use different widths, use lower 8 bits. * The delay will be managed up to 256ms, which is more than * enough */ frame_diff = (current_frame_number - subs->last_frame_number) & 0xff; /* Approximation based on number of samples per USB frame (ms), some truncation for 44.1 but the estimate is good enough */ est_delay = frame_diff * rate / 1000; if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) est_delay = subs->last_delay - est_delay; else est_delay = subs->last_delay + est_delay; if (est_delay < 0) est_delay = 0; return est_delay; } /* * return the current pcm pointer. just based on the hwptr_done value. */ static snd_pcm_uframes_t snd_usb_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_usb_substream *subs; unsigned int hwptr_done; subs = (struct snd_usb_substream *)substream->runtime->private_data; if (subs->stream->chip->shutdown) return SNDRV_PCM_POS_XRUN; spin_lock(&subs->lock); hwptr_done = subs->hwptr_done; substream->runtime->delay = snd_usb_pcm_delay(subs, substream->runtime->rate); spin_unlock(&subs->lock); return hwptr_done / (substream->runtime->frame_bits >> 3); } /* * find a matching audio format */ static struct audioformat *find_format(struct snd_usb_substream *subs) { struct audioformat *fp; struct audioformat *found = NULL; int cur_attr = 0, attr; list_for_each_entry(fp, &subs->fmt_list, list) { if (!(fp->formats & pcm_format_to_bits(subs->pcm_format))) continue; if (fp->channels != subs->channels) continue; if (subs->cur_rate < fp->rate_min || subs->cur_rate > fp->rate_max) continue; if (! (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)) { unsigned int i; for (i = 0; i < fp->nr_rates; i++) if (fp->rate_table[i] == subs->cur_rate) break; if (i >= fp->nr_rates) continue; } attr = fp->ep_attr & USB_ENDPOINT_SYNCTYPE; if (! found) { found = fp; cur_attr = attr; continue; } /* avoid async out and adaptive in if the other method * supports the same format. * this is a workaround for the case like * M-audio audiophile USB. */ if (attr != cur_attr) { if ((attr == USB_ENDPOINT_SYNC_ASYNC && subs->direction == SNDRV_PCM_STREAM_PLAYBACK) || (attr == USB_ENDPOINT_SYNC_ADAPTIVE && subs->direction == SNDRV_PCM_STREAM_CAPTURE)) continue; if ((cur_attr == USB_ENDPOINT_SYNC_ASYNC && subs->direction == SNDRV_PCM_STREAM_PLAYBACK) || (cur_attr == USB_ENDPOINT_SYNC_ADAPTIVE && subs->direction == SNDRV_PCM_STREAM_CAPTURE)) { found = fp; cur_attr = attr; continue; } } /* find the format with the largest max. packet size */ if (fp->maxpacksize > found->maxpacksize) { found = fp; cur_attr = attr; } } return found; } static int init_pitch_v1(struct snd_usb_audio *chip, int iface, struct usb_host_interface *alts, struct audioformat *fmt) { struct usb_device *dev = chip->dev; unsigned int ep; unsigned char data[1]; int err; ep = get_endpoint(alts, 0)->bEndpointAddress; data[0] = 1; if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR, USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT, UAC_EP_CS_ATTR_PITCH_CONTROL << 8, ep, data, sizeof(data))) < 0) { snd_printk(KERN_ERR "%d:%d:%d: cannot set enable PITCH\n", dev->devnum, iface, ep); return err; } return 0; } static int init_pitch_v2(struct snd_usb_audio *chip, int iface, struct usb_host_interface *alts, struct audioformat *fmt) { struct usb_device *dev = chip->dev; unsigned char data[1]; int err; data[0] = 1; if ((err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR, USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_OUT, UAC2_EP_CS_PITCH << 8, 0, data, sizeof(data))) < 0) { snd_printk(KERN_ERR "%d:%d:%d: cannot set enable PITCH (v2)\n", dev->devnum, iface, fmt->altsetting); return err; } return 0; } /* * initialize the pitch control and sample rate */ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, struct usb_host_interface *alts, struct audioformat *fmt) { struct usb_interface_descriptor *altsd = get_iface_desc(alts); /* if endpoint doesn't have pitch control, bail out */ if (!(fmt->attributes & UAC_EP_CS_ATTR_PITCH_CONTROL)) return 0; switch (altsd->bInterfaceProtocol) { case UAC_VERSION_1: default: return init_pitch_v1(chip, iface, alts, fmt); case UAC_VERSION_2: return init_pitch_v2(chip, iface, alts, fmt); } } static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep) { int err; if (!subs->data_endpoint) return -EINVAL; if (!test_and_set_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags)) { struct snd_usb_endpoint *ep = subs->data_endpoint; snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep); ep->data_subs = subs; err = snd_usb_endpoint_start(ep, can_sleep); if (err < 0) { clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags); return err; } } if (subs->sync_endpoint && !test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) { struct snd_usb_endpoint *ep = subs->sync_endpoint; if (subs->data_endpoint->iface != subs->sync_endpoint->iface || subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) { err = usb_set_interface(subs->dev, subs->sync_endpoint->iface, subs->sync_endpoint->alt_idx); if (err < 0) { snd_printk(KERN_ERR "%d:%d:%d: cannot set interface (%d)\n", subs->dev->devnum, subs->sync_endpoint->iface, subs->sync_endpoint->alt_idx, err); return -EIO; } } snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep); ep->sync_slave = subs->data_endpoint; err = snd_usb_endpoint_start(ep, can_sleep); if (err < 0) { clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags); return err; } } return 0; } static void stop_endpoints(struct snd_usb_substream *subs, bool wait) { if (test_and_clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) snd_usb_endpoint_stop(subs->sync_endpoint); if (test_and_clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags)) snd_usb_endpoint_stop(subs->data_endpoint); if (wait) { snd_usb_endpoint_sync_pending_stop(subs->sync_endpoint); snd_usb_endpoint_sync_pending_stop(subs->data_endpoint); } } static int deactivate_endpoints(struct snd_usb_substream *subs) { int reta, retb; reta = snd_usb_endpoint_deactivate(subs->sync_endpoint); retb = snd_usb_endpoint_deactivate(subs->data_endpoint); if (reta < 0) return reta; if (retb < 0) return retb; return 0; } /* * find a matching format and set up the interface */ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt) { struct usb_device *dev = subs->dev; struct usb_host_interface *alts; struct usb_interface_descriptor *altsd; struct usb_interface *iface; unsigned int ep, attr; int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK; int err, implicit_fb = 0; iface = usb_ifnum_to_if(dev, fmt->iface); if (WARN_ON(!iface)) return -EINVAL; alts = &iface->altsetting[fmt->altset_idx]; altsd = get_iface_desc(alts); if (WARN_ON(altsd->bAlternateSetting != fmt->altsetting)) return -EINVAL; if (fmt == subs->cur_audiofmt) return 0; /* close the old interface */ if (subs->interface >= 0 && subs->interface != fmt->iface) { err = usb_set_interface(subs->dev, subs->interface, 0); if (err < 0) { snd_printk(KERN_ERR "%d:%d:%d: return to setting 0 failed (%d)\n", dev->devnum, fmt->iface, fmt->altsetting, err); return -EIO; } subs->interface = -1; subs->altset_idx = 0; } /* set interface */ if (subs->interface != fmt->iface || subs->altset_idx != fmt->altset_idx) { err = usb_set_interface(dev, fmt->iface, fmt->altsetting); if (err < 0) { snd_printk(KERN_ERR "%d:%d:%d: usb_set_interface failed (%d)\n", dev->devnum, fmt->iface, fmt->altsetting, err); return -EIO; } snd_printdd(KERN_INFO "setting usb interface %d:%d\n", fmt->iface, fmt->altsetting); subs->interface = fmt->iface; subs->altset_idx = fmt->altset_idx; snd_usb_set_interface_quirk(dev); } subs->data_endpoint = snd_usb_add_endpoint(subs->stream->chip, alts, fmt->endpoint, subs->direction, SND_USB_ENDPOINT_TYPE_DATA); if (!subs->data_endpoint) return -EINVAL; /* we need a sync pipe in async OUT or adaptive IN mode */ /* check the number of EP, since some devices have broken * descriptors which fool us. if it has only one EP, * assume it as adaptive-out or sync-in. */ attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE; switch (subs->stream->chip->usb_id) { case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */ case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */ if (is_playback) { implicit_fb = 1; ep = 0x81; iface = usb_ifnum_to_if(dev, 3); if (!iface || iface->num_altsetting == 0) return -EINVAL; alts = &iface->altsetting[1]; goto add_sync_ep; } break; case USB_ID(0x0763, 0x2080): /* M-Audio FastTrack Ultra */ case USB_ID(0x0763, 0x2081): if (is_playback) { implicit_fb = 1; ep = 0x81; iface = usb_ifnum_to_if(dev, 2); if (!iface || iface->num_altsetting == 0) return -EINVAL; alts = &iface->altsetting[1]; goto add_sync_ep; } } if (((is_playback && attr == USB_ENDPOINT_SYNC_ASYNC) || (!is_playback && attr == USB_ENDPOINT_SYNC_ADAPTIVE)) && altsd->bNumEndpoints >= 2) { /* check sync-pipe endpoint */ /* ... and check descriptor size before accessing bSynchAddress because there is a version of the SB Audigy 2 NX firmware lacking the audio fields in the endpoint descriptors */ if ((get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC || (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && get_endpoint(alts, 1)->bSynchAddress != 0 && !implicit_fb)) { snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. bmAttributes %02x, bLength %d, bSynchAddress %02x\n", dev->devnum, fmt->iface, fmt->altsetting, get_endpoint(alts, 1)->bmAttributes, get_endpoint(alts, 1)->bLength, get_endpoint(alts, 1)->bSynchAddress); return -EINVAL; } ep = get_endpoint(alts, 1)->bEndpointAddress; if (!implicit_fb && get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) || (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) { snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n", dev->devnum, fmt->iface, fmt->altsetting, is_playback, ep, get_endpoint(alts, 0)->bSynchAddress); return -EINVAL; } implicit_fb = (get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_USAGE_MASK) == USB_ENDPOINT_USAGE_IMPLICIT_FB; add_sync_ep: subs->sync_endpoint = snd_usb_add_endpoint(subs->stream->chip, alts, ep, !subs->direction, implicit_fb ? SND_USB_ENDPOINT_TYPE_DATA : SND_USB_ENDPOINT_TYPE_SYNC); if (!subs->sync_endpoint) return -EINVAL; subs->data_endpoint->sync_master = subs->sync_endpoint; } if ((err = snd_usb_init_pitch(subs->stream->chip, fmt->iface, alts, fmt)) < 0) return err; subs->cur_audiofmt = fmt; snd_usb_set_format_quirk(subs, fmt); #if 0 printk(KERN_DEBUG "setting done: format = %d, rate = %d..%d, channels = %d\n", fmt->format, fmt->rate_min, fmt->rate_max, fmt->channels); printk(KERN_DEBUG " datapipe = 0x%0x, syncpipe = 0x%0x\n", subs->datapipe, subs->syncpipe); #endif return 0; } /* * Return the score of matching two audioformats. * Veto the audioformat if: * - It has no channels for some reason. * - Requested PCM format is not supported. * - Requested sample rate is not supported. */ static int match_endpoint_audioformats(struct audioformat *fp, struct audioformat *match, int rate, snd_pcm_format_t pcm_format) { int i; int score = 0; if (fp->channels < 1) { snd_printdd("%s: (fmt @%p) no channels\n", __func__, fp); return 0; } if (!(fp->formats & pcm_format_to_bits(pcm_format))) { snd_printdd("%s: (fmt @%p) no match for format %d\n", __func__, fp, pcm_format); return 0; } for (i = 0; i < fp->nr_rates; i++) { if (fp->rate_table[i] == rate) { score++; break; } } if (!score) { snd_printdd("%s: (fmt @%p) no match for rate %d\n", __func__, fp, rate); return 0; } if (fp->channels == match->channels) score++; snd_printdd("%s: (fmt @%p) score %d\n", __func__, fp, score); return score; } /* * Configure the sync ep using the rate and pcm format of the data ep. */ static int configure_sync_endpoint(struct snd_usb_substream *subs) { int ret; struct audioformat *fp; struct audioformat *sync_fp = NULL; int cur_score = 0; int sync_period_bytes = subs->period_bytes; struct snd_usb_substream *sync_subs = &subs->stream->substream[subs->direction ^ 1]; if (subs->sync_endpoint->type != SND_USB_ENDPOINT_TYPE_DATA || !subs->stream) return snd_usb_endpoint_set_params(subs->sync_endpoint, subs->pcm_format, subs->channels, subs->period_bytes, subs->cur_rate, subs->cur_audiofmt, NULL); /* Try to find the best matching audioformat. */ list_for_each_entry(fp, &sync_subs->fmt_list, list) { int score = match_endpoint_audioformats(fp, subs->cur_audiofmt, subs->cur_rate, subs->pcm_format); if (score > cur_score) { sync_fp = fp; cur_score = score; } } if (unlikely(sync_fp == NULL)) { snd_printk(KERN_ERR "%s: no valid audioformat for sync ep %x found\n", __func__, sync_subs->ep_num); return -EINVAL; } /* * Recalculate the period bytes if channel number differ between * data and sync ep audioformat. */ if (sync_fp->channels != subs->channels) { sync_period_bytes = (subs->period_bytes / subs->channels) * sync_fp->channels; snd_printdd("%s: adjusted sync ep period bytes (%d -> %d)\n", __func__, subs->period_bytes, sync_period_bytes); } ret = snd_usb_endpoint_set_params(subs->sync_endpoint, subs->pcm_format, sync_fp->channels, sync_period_bytes, subs->cur_rate, sync_fp, NULL); return ret; } /* * configure endpoint params * * called during initial setup and upon resume */ static int configure_endpoint(struct snd_usb_substream *subs) { int ret; /* format changed */ stop_endpoints(subs, true); ret = snd_usb_endpoint_set_params(subs->data_endpoint, subs->pcm_format, subs->channels, subs->period_bytes, subs->cur_rate, subs->cur_audiofmt, subs->sync_endpoint); if (ret < 0) return ret; if (subs->sync_endpoint) ret = configure_sync_endpoint(subs); return ret; } /* * hw_params callback * * allocate a buffer and set the given audio format. * * so far we use a physically linear buffer although packetize transfer * doesn't need a continuous area. * if sg buffer is supported on the later version of alsa, we'll follow * that. */ static int snd_usb_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_usb_substream *subs = substream->runtime->private_data; struct audioformat *fmt; int ret; ret = snd_pcm_lib_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); if (ret < 0) return ret; subs->pcm_format = params_format(hw_params); subs->period_bytes = params_period_bytes(hw_params); subs->channels = params_channels(hw_params); subs->cur_rate = params_rate(hw_params); fmt = find_format(subs); if (!fmt) { snd_printd(KERN_DEBUG "cannot set format: format = %#x, rate = %d, channels = %d\n", subs->pcm_format, subs->cur_rate, subs->channels); return -EINVAL; } down_read(&subs->stream->chip->shutdown_rwsem); if (subs->stream->chip->shutdown) ret = -ENODEV; else ret = set_format(subs, fmt); up_read(&subs->stream->chip->shutdown_rwsem); if (ret < 0) return ret; subs->interface = fmt->iface; subs->altset_idx = fmt->altset_idx; subs->need_setup_ep = true; return 0; } /* * hw_free callback * * reset the audio format and release the buffer */ static int snd_usb_hw_free(struct snd_pcm_substream *substream) { struct snd_usb_substream *subs = substream->runtime->private_data; subs->cur_audiofmt = NULL; subs->cur_rate = 0; subs->period_bytes = 0; down_read(&subs->stream->chip->shutdown_rwsem); if (!subs->stream->chip->shutdown) { stop_endpoints(subs, true); deactivate_endpoints(subs); } up_read(&subs->stream->chip->shutdown_rwsem); return snd_pcm_lib_free_vmalloc_buffer(substream); } /* * prepare callback * * only a few subtle things... */ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_usb_substream *subs = runtime->private_data; struct usb_host_interface *alts; struct usb_interface *iface; int ret; if (! subs->cur_audiofmt) { snd_printk(KERN_ERR "usbaudio: no format is specified!\n"); return -ENXIO; } down_read(&subs->stream->chip->shutdown_rwsem); if (subs->stream->chip->shutdown) { ret = -ENODEV; goto unlock; } if (snd_BUG_ON(!subs->data_endpoint)) { ret = -EIO; goto unlock; } snd_usb_endpoint_sync_pending_stop(subs->sync_endpoint); snd_usb_endpoint_sync_pending_stop(subs->data_endpoint); ret = set_format(subs, subs->cur_audiofmt); if (ret < 0) goto unlock; iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface); alts = &iface->altsetting[subs->cur_audiofmt->altset_idx]; ret = snd_usb_init_sample_rate(subs->stream->chip, subs->cur_audiofmt->iface, alts, subs->cur_audiofmt, subs->cur_rate); if (ret < 0) goto unlock; if (subs->need_setup_ep) { ret = configure_endpoint(subs); if (ret < 0) goto unlock; subs->need_setup_ep = false; } /* some unit conversions in runtime */ subs->data_endpoint->maxframesize = bytes_to_frames(runtime, subs->data_endpoint->maxpacksize); subs->data_endpoint->curframesize = bytes_to_frames(runtime, subs->data_endpoint->curpacksize); /* reset the pointer */ subs->hwptr_done = 0; subs->transfer_done = 0; subs->last_delay = 0; subs->last_frame_number = 0; runtime->delay = 0; /* for playback, submit the URBs now; otherwise, the first hwptr_done * updates for all URBs would happen at the same time when starting */ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) ret = start_endpoints(subs, true); unlock: up_read(&subs->stream->chip->shutdown_rwsem); return ret; } static struct snd_pcm_hardware snd_usb_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE, .buffer_bytes_max = 1024 * 1024, .period_bytes_min = 64, .period_bytes_max = 512 * 1024, .periods_min = 2, .periods_max = 1024, }; static int hw_check_valid_format(struct snd_usb_substream *subs, struct snd_pcm_hw_params *params, struct audioformat *fp) { struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *ct = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct snd_mask *fmts = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); struct snd_interval *pt = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_TIME); struct snd_mask check_fmts; unsigned int ptime; /* check the format */ snd_mask_none(&check_fmts); check_fmts.bits[0] = (u32)fp->formats; check_fmts.bits[1] = (u32)(fp->formats >> 32); snd_mask_intersect(&check_fmts, fmts); if (snd_mask_empty(&check_fmts)) { hwc_debug(" > check: no supported format %d\n", fp->format); return 0; } /* check the channels */ if (fp->channels < ct->min || fp->channels > ct->max) { hwc_debug(" > check: no valid channels %d (%d/%d)\n", fp->channels, ct->min, ct->max); return 0; } /* check the rate is within the range */ if (fp->rate_min > it->max || (fp->rate_min == it->max && it->openmax)) { hwc_debug(" > check: rate_min %d > max %d\n", fp->rate_min, it->max); return 0; } if (fp->rate_max < it->min || (fp->rate_max == it->min && it->openmin)) { hwc_debug(" > check: rate_max %d < min %d\n", fp->rate_max, it->min); return 0; } /* check whether the period time is >= the data packet interval */ if (subs->speed != USB_SPEED_FULL) { ptime = 125 * (1 << fp->datainterval); if (ptime > pt->max || (ptime == pt->max && pt->openmax)) { hwc_debug(" > check: ptime %u > max %u\n", ptime, pt->max); return 0; } } return 1; } static int hw_rule_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_usb_substream *subs = rule->private; struct audioformat *fp; struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); unsigned int rmin, rmax; int changed; hwc_debug("hw_rule_rate: (%d,%d)\n", it->min, it->max); changed = 0; rmin = rmax = 0; list_for_each_entry(fp, &subs->fmt_list, list) { if (!hw_check_valid_format(subs, params, fp)) continue; if (changed++) { if (rmin > fp->rate_min) rmin = fp->rate_min; if (rmax < fp->rate_max) rmax = fp->rate_max; } else { rmin = fp->rate_min; rmax = fp->rate_max; } } if (!changed) { hwc_debug(" --> get empty\n"); it->empty = 1; return -EINVAL; } changed = 0; if (it->min < rmin) { it->min = rmin; it->openmin = 0; changed = 1; } if (it->max > rmax) { it->max = rmax; it->openmax = 0; changed = 1; } if (snd_interval_checkempty(it)) { it->empty = 1; return -EINVAL; } hwc_debug(" --> (%d, %d) (changed = %d)\n", it->min, it->max, changed); return changed; } static int hw_rule_channels(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_usb_substream *subs = rule->private; struct audioformat *fp; struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); unsigned int rmin, rmax; int changed; hwc_debug("hw_rule_channels: (%d,%d)\n", it->min, it->max); changed = 0; rmin = rmax = 0; list_for_each_entry(fp, &subs->fmt_list, list) { if (!hw_check_valid_format(subs, params, fp)) continue; if (changed++) { if (rmin > fp->channels) rmin = fp->channels; if (rmax < fp->channels) rmax = fp->channels; } else { rmin = fp->channels; rmax = fp->channels; } } if (!changed) { hwc_debug(" --> get empty\n"); it->empty = 1; return -EINVAL; } changed = 0; if (it->min < rmin) { it->min = rmin; it->openmin = 0; changed = 1; } if (it->max > rmax) { it->max = rmax; it->openmax = 0; changed = 1; } if (snd_interval_checkempty(it)) { it->empty = 1; return -EINVAL; } hwc_debug(" --> (%d, %d) (changed = %d)\n", it->min, it->max, changed); return changed; } static int hw_rule_format(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_usb_substream *subs = rule->private; struct audioformat *fp; struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); u64 fbits; u32 oldbits[2]; int changed; hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]); fbits = 0; list_for_each_entry(fp, &subs->fmt_list, list) { if (!hw_check_valid_format(subs, params, fp)) continue; fbits |= fp->formats; } oldbits[0] = fmt->bits[0]; oldbits[1] = fmt->bits[1]; fmt->bits[0] &= (u32)fbits; fmt->bits[1] &= (u32)(fbits >> 32); if (!fmt->bits[0] && !fmt->bits[1]) { hwc_debug(" --> get empty\n"); return -EINVAL; } changed = (oldbits[0] != fmt->bits[0] || oldbits[1] != fmt->bits[1]); hwc_debug(" --> %x:%x (changed = %d)\n", fmt->bits[0], fmt->bits[1], changed); return changed; } static int hw_rule_period_time(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_usb_substream *subs = rule->private; struct audioformat *fp; struct snd_interval *it; unsigned char min_datainterval; unsigned int pmin; int changed; it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_TIME); hwc_debug("hw_rule_period_time: (%u,%u)\n", it->min, it->max); min_datainterval = 0xff; list_for_each_entry(fp, &subs->fmt_list, list) { if (!hw_check_valid_format(subs, params, fp)) continue; min_datainterval = min(min_datainterval, fp->datainterval); } if (min_datainterval == 0xff) { hwc_debug(" --> get empty\n"); it->empty = 1; return -EINVAL; } pmin = 125 * (1 << min_datainterval); changed = 0; if (it->min < pmin) { it->min = pmin; it->openmin = 0; changed = 1; } if (snd_interval_checkempty(it)) { it->empty = 1; return -EINVAL; } hwc_debug(" --> (%u,%u) (changed = %d)\n", it->min, it->max, changed); return changed; } /* * If the device supports unusual bit rates, does the request meet these? */ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs) { struct audioformat *fp; int *rate_list; int count = 0, needs_knot = 0; int err; kfree(subs->rate_list.list); subs->rate_list.list = NULL; list_for_each_entry(fp, &subs->fmt_list, list) { if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) return 0; count += fp->nr_rates; if (fp->rates & SNDRV_PCM_RATE_KNOT) needs_knot = 1; } if (!needs_knot) return 0; subs->rate_list.list = rate_list = kmalloc(sizeof(int) * count, GFP_KERNEL); if (!subs->rate_list.list) return -ENOMEM; subs->rate_list.count = count; subs->rate_list.mask = 0; count = 0; list_for_each_entry(fp, &subs->fmt_list, list) { int i; for (i = 0; i < fp->nr_rates; i++) rate_list[count++] = fp->rate_table[i]; } err = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &subs->rate_list); if (err < 0) return err; return 0; } /* * set up the runtime hardware information. */ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs) { struct audioformat *fp; unsigned int pt, ptmin; int param_period_time_if_needed; int err; runtime->hw.formats = subs->formats; runtime->hw.rate_min = 0x7fffffff; runtime->hw.rate_max = 0; runtime->hw.channels_min = 256; runtime->hw.channels_max = 0; runtime->hw.rates = 0; ptmin = UINT_MAX; /* check min/max rates and channels */ list_for_each_entry(fp, &subs->fmt_list, list) { runtime->hw.rates |= fp->rates; if (runtime->hw.rate_min > fp->rate_min) runtime->hw.rate_min = fp->rate_min; if (runtime->hw.rate_max < fp->rate_max) runtime->hw.rate_max = fp->rate_max; if (runtime->hw.channels_min > fp->channels) runtime->hw.channels_min = fp->channels; if (runtime->hw.channels_max < fp->channels) runtime->hw.channels_max = fp->channels; if (fp->fmt_type == UAC_FORMAT_TYPE_II && fp->frame_size > 0) { /* FIXME: there might be more than one audio formats... */ runtime->hw.period_bytes_min = runtime->hw.period_bytes_max = fp->frame_size; } pt = 125 * (1 << fp->datainterval); ptmin = min(ptmin, pt); } err = snd_usb_autoresume(subs->stream->chip); if (err < 0) return err; param_period_time_if_needed = SNDRV_PCM_HW_PARAM_PERIOD_TIME; if (subs->speed == USB_SPEED_FULL) /* full speed devices have fixed data packet interval */ ptmin = 1000; if (ptmin == 1000) /* if period time doesn't go below 1 ms, no rules needed */ param_period_time_if_needed = -1; snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, ptmin, UINT_MAX); if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, hw_rule_rate, subs, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_CHANNELS, param_period_time_if_needed, -1)) < 0) goto rep_err; if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, hw_rule_channels, subs, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_RATE, param_period_time_if_needed, -1)) < 0) goto rep_err; if ((err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT, hw_rule_format, subs, SNDRV_PCM_HW_PARAM_RATE, SNDRV_PCM_HW_PARAM_CHANNELS, param_period_time_if_needed, -1)) < 0) goto rep_err; if (param_period_time_if_needed >= 0) { err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME, hw_rule_period_time, subs, SNDRV_PCM_HW_PARAM_FORMAT, SNDRV_PCM_HW_PARAM_CHANNELS, SNDRV_PCM_HW_PARAM_RATE, -1); if (err < 0) goto rep_err; } if ((err = snd_usb_pcm_check_knot(runtime, subs)) < 0) goto rep_err; return 0; rep_err: snd_usb_autosuspend(subs->stream->chip); return err; } static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction) { struct snd_usb_stream *as = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_usb_substream *subs = &as->substream[direction]; subs->interface = -1; subs->altset_idx = 0; runtime->hw = snd_usb_hardware; runtime->private_data = subs; subs->pcm_substream = substream; /* runtime PM is also done there */ /* initialize DSD/DOP context */ subs->dsd_dop.byte_idx = 0; subs->dsd_dop.channel = 0; subs->dsd_dop.marker = 1; return setup_hw_info(runtime, subs); } static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction) { struct snd_usb_stream *as = snd_pcm_substream_chip(substream); struct snd_usb_substream *subs = &as->substream[direction]; stop_endpoints(subs, true); if (!as->chip->shutdown && subs->interface >= 0) { usb_set_interface(subs->dev, subs->interface, 0); subs->interface = -1; } subs->pcm_substream = NULL; snd_usb_autosuspend(subs->stream->chip); return 0; } /* Since a URB can handle only a single linear buffer, we must use double * buffering when the data to be transferred overflows the buffer boundary. * To avoid inconsistencies when updating hwptr_done, we use double buffering * for all URBs. */ static void retire_capture_urb(struct snd_usb_substream *subs, struct urb *urb) { struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; unsigned int stride, frames, bytes, oldptr; int i, period_elapsed = 0; unsigned long flags; unsigned char *cp; int current_frame_number; /* read frame number here, update pointer in critical section */ current_frame_number = usb_get_current_frame_number(subs->dev); stride = runtime->frame_bits >> 3; for (i = 0; i < urb->number_of_packets; i++) { cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset + subs->pkt_offset_adj; if (urb->iso_frame_desc[i].status && printk_ratelimit()) { snd_printdd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status); // continue; } bytes = urb->iso_frame_desc[i].actual_length; frames = bytes / stride; if (!subs->txfr_quirk) bytes = frames * stride; if (bytes % (runtime->sample_bits >> 3) != 0) { int oldbytes = bytes; bytes = frames * stride; snd_printdd(KERN_ERR "Corrected urb data len. %d->%d\n", oldbytes, bytes); } /* update the current pointer */ spin_lock_irqsave(&subs->lock, flags); oldptr = subs->hwptr_done; subs->hwptr_done += bytes; if (subs->hwptr_done >= runtime->buffer_size * stride) subs->hwptr_done -= runtime->buffer_size * stride; frames = (bytes + (oldptr % stride)) / stride; subs->transfer_done += frames; if (subs->transfer_done >= runtime->period_size) { subs->transfer_done -= runtime->period_size; period_elapsed = 1; } /* capture delay is by construction limited to one URB, * reset delays here */ runtime->delay = subs->last_delay = 0; /* realign last_frame_number */ subs->last_frame_number = current_frame_number; subs->last_frame_number &= 0xFF; /* keep 8 LSBs */ spin_unlock_irqrestore(&subs->lock, flags); /* copy a data chunk */ if (oldptr + bytes > runtime->buffer_size * stride) { unsigned int bytes1 = runtime->buffer_size * stride - oldptr; memcpy(runtime->dma_area + oldptr, cp, bytes1); memcpy(runtime->dma_area, cp + bytes1, bytes - bytes1); } else { memcpy(runtime->dma_area + oldptr, cp, bytes); } } if (period_elapsed) snd_pcm_period_elapsed(subs->pcm_substream); } static inline void fill_playback_urb_dsd_dop(struct snd_usb_substream *subs, struct urb *urb, unsigned int bytes) { struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; unsigned int stride = runtime->frame_bits >> 3; unsigned int dst_idx = 0; unsigned int src_idx = subs->hwptr_done; unsigned int wrap = runtime->buffer_size * stride; u8 *dst = urb->transfer_buffer; u8 *src = runtime->dma_area; u8 marker[] = { 0x05, 0xfa }; /* * The DSP DOP format defines a way to transport DSD samples over * normal PCM data endpoints. It requires stuffing of marker bytes * (0x05 and 0xfa, alternating per sample frame), and then expects * 2 additional bytes of actual payload. The whole frame is stored * LSB. * * Hence, for a stereo transport, the buffer layout looks like this, * where L refers to left channel samples and R to right. * * L1 L2 0x05 R1 R2 0x05 L3 L4 0xfa R3 R4 0xfa * L5 L6 0x05 R5 R6 0x05 L7 L8 0xfa R7 R8 0xfa * ..... * */ while (bytes--) { if (++subs->dsd_dop.byte_idx == 3) { /* frame boundary? */ dst[dst_idx++] = marker[subs->dsd_dop.marker]; src_idx += 2; subs->dsd_dop.byte_idx = 0; if (++subs->dsd_dop.channel % runtime->channels == 0) { /* alternate the marker */ subs->dsd_dop.marker++; subs->dsd_dop.marker %= ARRAY_SIZE(marker); subs->dsd_dop.channel = 0; } } else { /* stuff the DSD payload */ int idx = (src_idx + subs->dsd_dop.byte_idx - 1) % wrap; if (subs->cur_audiofmt->dsd_bitrev) dst[dst_idx++] = bitrev8(src[idx]); else dst[dst_idx++] = src[idx]; subs->hwptr_done++; } } } static void prepare_playback_urb(struct snd_usb_substream *subs, struct urb *urb) { struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; struct snd_usb_endpoint *ep = subs->data_endpoint; struct snd_urb_ctx *ctx = urb->context; unsigned int counts, frames, bytes; int i, stride, period_elapsed = 0; unsigned long flags; stride = runtime->frame_bits >> 3; frames = 0; urb->number_of_packets = 0; spin_lock_irqsave(&subs->lock, flags); for (i = 0; i < ctx->packets; i++) { if (ctx->packet_size[i]) counts = ctx->packet_size[i]; else counts = snd_usb_endpoint_next_packet_size(ep); /* set up descriptor */ urb->iso_frame_desc[i].offset = frames * ep->stride; urb->iso_frame_desc[i].length = counts * ep->stride; frames += counts; urb->number_of_packets++; subs->transfer_done += counts; if (subs->transfer_done >= runtime->period_size) { subs->transfer_done -= runtime->period_size; period_elapsed = 1; if (subs->fmt_type == UAC_FORMAT_TYPE_II) { if (subs->transfer_done > 0) { /* FIXME: fill-max mode is not * supported yet */ frames -= subs->transfer_done; counts -= subs->transfer_done; urb->iso_frame_desc[i].length = counts * ep->stride; subs->transfer_done = 0; } i++; if (i < ctx->packets) { /* add a transfer delimiter */ urb->iso_frame_desc[i].offset = frames * ep->stride; urb->iso_frame_desc[i].length = 0; urb->number_of_packets++; } break; } } if (period_elapsed && !snd_usb_endpoint_implicit_feedback_sink(subs->data_endpoint)) /* finish at the period boundary */ break; } bytes = frames * ep->stride; if (unlikely(subs->pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && subs->cur_audiofmt->dsd_dop)) { fill_playback_urb_dsd_dop(subs, urb, bytes); } else if (unlikely(subs->pcm_format == SNDRV_PCM_FORMAT_DSD_U8 && subs->cur_audiofmt->dsd_bitrev)) { /* bit-reverse the bytes */ u8 *buf = urb->transfer_buffer; for (i = 0; i < bytes; i++) { int idx = (subs->hwptr_done + i) % (runtime->buffer_size * stride); buf[i] = bitrev8(runtime->dma_area[idx]); } subs->hwptr_done += bytes; } else { /* usual PCM */ if (subs->hwptr_done + bytes > runtime->buffer_size * stride) { /* err, the transferred area goes over buffer boundary. */ unsigned int bytes1 = runtime->buffer_size * stride - subs->hwptr_done; memcpy(urb->transfer_buffer, runtime->dma_area + subs->hwptr_done, bytes1); memcpy(urb->transfer_buffer + bytes1, runtime->dma_area, bytes - bytes1); } else { memcpy(urb->transfer_buffer, runtime->dma_area + subs->hwptr_done, bytes); } subs->hwptr_done += bytes; } if (subs->hwptr_done >= runtime->buffer_size * stride) subs->hwptr_done -= runtime->buffer_size * stride; /* update delay with exact number of samples queued */ runtime->delay = subs->last_delay; runtime->delay += frames; subs->last_delay = runtime->delay; /* realign last_frame_number */ subs->last_frame_number = usb_get_current_frame_number(subs->dev); subs->last_frame_number &= 0xFF; /* keep 8 LSBs */ spin_unlock_irqrestore(&subs->lock, flags); urb->transfer_buffer_length = bytes; if (period_elapsed) snd_pcm_period_elapsed(subs->pcm_substream); } /* * process after playback data complete * - decrease the delay count again */ static void retire_playback_urb(struct snd_usb_substream *subs, struct urb *urb) { unsigned long flags; struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; struct snd_usb_endpoint *ep = subs->data_endpoint; int processed = urb->transfer_buffer_length / ep->stride; int est_delay; /* ignore the delay accounting when procssed=0 is given, i.e. * silent payloads are procssed before handling the actual data */ if (!processed) return; spin_lock_irqsave(&subs->lock, flags); if (!subs->last_delay) goto out; /* short path */ est_delay = snd_usb_pcm_delay(subs, runtime->rate); /* update delay with exact number of samples played */ if (processed > subs->last_delay) subs->last_delay = 0; else subs->last_delay -= processed; runtime->delay = subs->last_delay; /* * Report when delay estimate is off by more than 2ms. * The error should be lower than 2ms since the estimate relies * on two reads of a counter updated every ms. */ if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2) snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n", est_delay, subs->last_delay); if (!subs->running) { /* update last_frame_number for delay counting here since * prepare_playback_urb won't be called during pause */ subs->last_frame_number = usb_get_current_frame_number(subs->dev) & 0xff; } out: spin_unlock_irqrestore(&subs->lock, flags); } static int snd_usb_playback_open(struct snd_pcm_substream *substream) { return snd_usb_pcm_open(substream, SNDRV_PCM_STREAM_PLAYBACK); } static int snd_usb_playback_close(struct snd_pcm_substream *substream) { return snd_usb_pcm_close(substream, SNDRV_PCM_STREAM_PLAYBACK); } static int snd_usb_capture_open(struct snd_pcm_substream *substream) { return snd_usb_pcm_open(substream, SNDRV_PCM_STREAM_CAPTURE); } static int snd_usb_capture_close(struct snd_pcm_substream *substream) { return snd_usb_pcm_close(substream, SNDRV_PCM_STREAM_CAPTURE); } static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_usb_substream *subs = substream->runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: subs->data_endpoint->prepare_data_urb = prepare_playback_urb; subs->data_endpoint->retire_data_urb = retire_playback_urb; subs->running = 1; return 0; case SNDRV_PCM_TRIGGER_STOP: stop_endpoints(subs, false); subs->running = 0; return 0; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: subs->data_endpoint->prepare_data_urb = NULL; /* keep retire_data_urb for delay calculation */ subs->data_endpoint->retire_data_urb = retire_playback_urb; subs->running = 0; return 0; } return -EINVAL; } static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd) { int err; struct snd_usb_substream *subs = substream->runtime->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: err = start_endpoints(subs, false); if (err < 0) return err; subs->data_endpoint->retire_data_urb = retire_capture_urb; subs->running = 1; return 0; case SNDRV_PCM_TRIGGER_STOP: stop_endpoints(subs, false); subs->running = 0; return 0; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: subs->data_endpoint->retire_data_urb = NULL; subs->running = 0; return 0; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: subs->data_endpoint->retire_data_urb = retire_capture_urb; subs->running = 1; return 0; } return -EINVAL; } static struct snd_pcm_ops snd_usb_playback_ops = { .open = snd_usb_playback_open, .close = snd_usb_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_usb_hw_params, .hw_free = snd_usb_hw_free, .prepare = snd_usb_pcm_prepare, .trigger = snd_usb_substream_playback_trigger, .pointer = snd_usb_pcm_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; static struct snd_pcm_ops snd_usb_capture_ops = { .open = snd_usb_capture_open, .close = snd_usb_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_usb_hw_params, .hw_free = snd_usb_hw_free, .prepare = snd_usb_pcm_prepare, .trigger = snd_usb_substream_capture_trigger, .pointer = snd_usb_pcm_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; void snd_usb_set_pcm_ops(struct snd_pcm *pcm, int stream) { snd_pcm_set_ops(pcm, stream, stream == SNDRV_PCM_STREAM_PLAYBACK ? &snd_usb_playback_ops : &snd_usb_capture_ops); }
gpl-2.0
NuxiNL/linux
sound/pci/hda/patch_cmedia.c
670
3518
/* * Universal Interface for Intel High Definition Audio Codec * * HD audio interface patch for C-Media CMI9880 * * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include "hda_codec.h" #include "hda_local.h" #include "hda_auto_parser.h" #include "hda_jack.h" #include "hda_generic.h" struct cmi_spec { struct hda_gen_spec gen; }; /* * stuff for auto-parser */ static const struct hda_codec_ops cmi_auto_patch_ops = { .build_controls = snd_hda_gen_build_controls, .build_pcms = snd_hda_gen_build_pcms, .init = snd_hda_gen_init, .free = snd_hda_gen_free, .unsol_event = snd_hda_jack_unsol_event, }; static int patch_cmi9880(struct hda_codec *codec) { struct cmi_spec *spec; struct auto_pin_cfg *cfg; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->spec = spec; codec->patch_ops = cmi_auto_patch_ops; cfg = &spec->gen.autocfg; snd_hda_gen_spec_init(&spec->gen); err = snd_hda_parse_pin_defcfg(codec, cfg, NULL, 0); if (err < 0) goto error; err = snd_hda_gen_parse_auto_config(codec, cfg); if (err < 0) goto error; return 0; error: snd_hda_gen_free(codec); return err; } static int patch_cmi8888(struct hda_codec *codec) { struct cmi_spec *spec; struct auto_pin_cfg *cfg; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; codec->spec = spec; codec->patch_ops = cmi_auto_patch_ops; cfg = &spec->gen.autocfg; snd_hda_gen_spec_init(&spec->gen); /* mask NID 0x10 from the playback volume selection; * it's a headphone boost volume handled manually below */ spec->gen.out_vol_mask = (1ULL << 0x10); err = snd_hda_parse_pin_defcfg(codec, cfg, NULL, 0); if (err < 0) goto error; err = snd_hda_gen_parse_auto_config(codec, cfg); if (err < 0) goto error; if (get_defcfg_device(snd_hda_codec_get_pincfg(codec, 0x10)) == AC_JACK_HP_OUT) { static const struct snd_kcontrol_new amp_kctl = HDA_CODEC_VOLUME("Headphone Amp Playback Volume", 0x10, 0, HDA_OUTPUT); if (!snd_hda_gen_add_kctl(&spec->gen, NULL, &amp_kctl)) { err = -ENOMEM; goto error; } } return 0; error: snd_hda_gen_free(codec); return err; } /* * patch entries */ static const struct hda_device_id snd_hda_id_cmedia[] = { HDA_CODEC_ENTRY(0x13f68888, "CMI8888", patch_cmi8888), HDA_CODEC_ENTRY(0x13f69880, "CMI9880", patch_cmi9880), HDA_CODEC_ENTRY(0x434d4980, "CMI9880", patch_cmi9880), {} /* terminator */ }; MODULE_DEVICE_TABLE(hdaudio, snd_hda_id_cmedia); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("C-Media HD-audio codec"); static struct hda_codec_driver cmedia_driver = { .id = snd_hda_id_cmedia, }; module_hda_codec_driver(cmedia_driver);
gpl-2.0
vadonka/lge-kernel-kang
arch/arm/mach-lh7a40x/irq-lh7a400.c
1694
1997
/* arch/arm/mach-lh7a40x/irq-lh7a400.c * * Copyright (C) 2004 Coastal Environmental Systems * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/irq.h> #include <mach/irqs.h> #include "common.h" /* CPU IRQ handling */ static void lh7a400_mask_irq (u32 irq) { INTC_INTENC = (1 << irq); } static void lh7a400_unmask_irq (u32 irq) { INTC_INTENS = (1 << irq); } static void lh7a400_ack_gpio_irq (u32 irq) { GPIO_GPIOFEOI = (1 << IRQ_TO_GPIO (irq)); INTC_INTENC = (1 << irq); } static struct irq_chip lh7a400_internal_chip = { .name = "MPU", .ack = lh7a400_mask_irq, /* Level triggering -> mask is ack */ .mask = lh7a400_mask_irq, .unmask = lh7a400_unmask_irq, }; static struct irq_chip lh7a400_gpio_chip = { .name = "GPIO", .ack = lh7a400_ack_gpio_irq, .mask = lh7a400_mask_irq, .unmask = lh7a400_unmask_irq, }; /* IRQ initialization */ void __init lh7a400_init_irq (void) { int irq; INTC_INTENC = 0xffffffff; /* Disable all interrupts */ GPIO_GPIOFINTEN = 0x00; /* Disable all GPIOF interrupts */ barrier (); for (irq = 0; irq < NR_IRQS; ++irq) { switch (irq) { case IRQ_GPIO0INTR: case IRQ_GPIO1INTR: case IRQ_GPIO2INTR: case IRQ_GPIO3INTR: case IRQ_GPIO4INTR: case IRQ_GPIO5INTR: case IRQ_GPIO6INTR: case IRQ_GPIO7INTR: set_irq_chip (irq, &lh7a400_gpio_chip); set_irq_handler (irq, handle_level_irq); /* OK default */ break; default: set_irq_chip (irq, &lh7a400_internal_chip); set_irq_handler (irq, handle_level_irq); } set_irq_flags (irq, IRQF_VALID); } lh7a40x_init_board_irq (); /* *** FIXME: the LH7a400 does use FIQ interrupts in some cases. For the time being, these are not initialized. */ /* init_FIQ(); */ }
gpl-2.0
deadman96385/android_kernel_asus_Z00A
drivers/edac/i7300_edac.c
1694
36190
/* * Intel 7300 class Memory Controllers kernel module (Clarksboro) * * This file may be distributed under the terms of the * GNU General Public License version 2 only. * * Copyright (c) 2010 by: * Mauro Carvalho Chehab <mchehab@redhat.com> * * Red Hat Inc. http://www.redhat.com * * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet * http://www.intel.com/Assets/PDF/datasheet/318082.pdf * * TODO: The chipset allow checking for PCI Express errors also. Currently, * the driver covers only memory error errors * * This driver uses "csrows" EDAC attribute to represent DIMM slot# */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> #include <linux/edac.h> #include <linux/mmzone.h> #include "edac_core.h" /* * Alter this version for the I7300 module when modifications are made */ #define I7300_REVISION " Ver: 1.0.0" #define EDAC_MOD_STR "i7300_edac" #define i7300_printk(level, fmt, arg...) \ edac_printk(level, "i7300", fmt, ##arg) #define i7300_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg) /*********************************************** * i7300 Limit constants Structs and static vars ***********************************************/ /* * Memory topology is organized as: * Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0) * Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0) * Each channel can have to 8 DIMM sets (called as SLOTS) * Slots should generally be filled in pairs * Except on Single Channel mode of operation * just slot 0/channel0 filled on this mode * On normal operation mode, the two channels on a branch should be * filled together for the same SLOT# * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four * channels on both branches should be filled */ /* Limits for i7300 */ #define MAX_SLOTS 8 #define MAX_BRANCHES 2 #define MAX_CH_PER_BRANCH 2 #define MAX_CHANNELS (MAX_CH_PER_BRANCH * MAX_BRANCHES) #define MAX_MIR 3 #define to_channel(ch, branch) ((((branch)) << 1) | (ch)) #define to_csrow(slot, ch, branch) \ (to_channel(ch, branch) | ((slot) << 2)) /* Device name and register DID (Device ID) */ struct i7300_dev_info { const char *ctl_name; /* name for this device */ u16 fsb_mapping_errors; /* DID for the branchmap,control */ }; /* Table of devices attributes supported by this driver */ static const struct i7300_dev_info i7300_devs[] = { { .ctl_name = "I7300", .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, }, }; struct i7300_dimm_info { int megabytes; /* size, 0 means not present */ }; /* driver private data structure */ struct i7300_pvt { struct pci_dev *pci_dev_16_0_fsb_ctlr; /* 16.0 */ struct pci_dev *pci_dev_16_1_fsb_addr_map; /* 16.1 */ struct pci_dev *pci_dev_16_2_fsb_err_regs; /* 16.2 */ struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES]; /* 21.0 and 22.0 */ u16 tolm; /* top of low memory */ u64 ambase; /* AMB BAR */ u32 mc_settings; /* Report several settings */ u32 mc_settings_a; u16 mir[MAX_MIR]; /* Memory Interleave Reg*/ u16 mtr[MAX_SLOTS][MAX_BRANCHES]; /* Memory Technlogy Reg */ u16 ambpresent[MAX_CHANNELS]; /* AMB present regs */ /* DIMM information matrix, allocating architecture maximums */ struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS]; /* Temporary buffer for use when preparing error messages */ char *tmp_prt_buffer; }; /* FIXME: Why do we need to have this static? */ static struct edac_pci_ctl_info *i7300_pci; /*************************************************** * i7300 Register definitions for memory enumeration ***************************************************/ /* * Device 16, * Function 0: System Address (not documented) * Function 1: Memory Branch Map, Control, Errors Register */ /* OFFSETS for Function 0 */ #define AMBASE 0x48 /* AMB Mem Mapped Reg Region Base */ #define MAXCH 0x56 /* Max Channel Number */ #define MAXDIMMPERCH 0x57 /* Max DIMM PER Channel Number */ /* OFFSETS for Function 1 */ #define MC_SETTINGS 0x40 #define IS_MIRRORED(mc) ((mc) & (1 << 16)) #define IS_ECC_ENABLED(mc) ((mc) & (1 << 5)) #define IS_RETRY_ENABLED(mc) ((mc) & (1 << 31)) #define IS_SCRBALGO_ENHANCED(mc) ((mc) & (1 << 8)) #define MC_SETTINGS_A 0x58 #define IS_SINGLE_MODE(mca) ((mca) & (1 << 14)) #define TOLM 0x6C #define MIR0 0x80 #define MIR1 0x84 #define MIR2 0x88 /* * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it * seems that we cannot use this information directly for the same usage. * Each memory slot may have up to 2 AMB interfaces, one for income and another * for outcome interface to the next slot. * For now, the driver just stores the AMB present registers, but rely only at * the MTR info to detect memory. * Datasheet is also not clear about how to map each AMBPRESENT registers to * one of the 4 available channels. */ #define AMBPRESENT_0 0x64 #define AMBPRESENT_1 0x66 static const u16 mtr_regs[MAX_SLOTS] = { 0x80, 0x84, 0x88, 0x8c, 0x82, 0x86, 0x8a, 0x8e }; /* * Defines to extract the vaious fields from the * MTRx - Memory Technology Registers */ #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (1 << 8)) #define MTR_DIMMS_ETHROTTLE(mtr) ((mtr) & (1 << 7)) #define MTR_DRAM_WIDTH(mtr) (((mtr) & (1 << 6)) ? 8 : 4) #define MTR_DRAM_BANKS(mtr) (((mtr) & (1 << 5)) ? 8 : 4) #define MTR_DIMM_RANKS(mtr) (((mtr) & (1 << 4)) ? 1 : 0) #define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3) #define MTR_DRAM_BANKS_ADDR_BITS 2 #define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13) #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) /************************************************ * i7300 Register definitions for error detection ************************************************/ /* * Device 16.1: FBD Error Registers */ #define FERR_FAT_FBD 0x98 static const char *ferr_fat_fbd_name[] = { [22] = "Non-Redundant Fast Reset Timeout", [2] = ">Tmid Thermal event with intelligent throttling disabled", [1] = "Memory or FBD configuration CRC read error", [0] = "Memory Write error on non-redundant retry or " "FBD configuration Write error on retry", }; #define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3) #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22)) #define FERR_NF_FBD 0xa0 static const char *ferr_nf_fbd_name[] = { [24] = "DIMM-Spare Copy Completed", [23] = "DIMM-Spare Copy Initiated", [22] = "Redundant Fast Reset Timeout", [21] = "Memory Write error on redundant retry", [18] = "SPD protocol Error", [17] = "FBD Northbound parity error on FBD Sync Status", [16] = "Correctable Patrol Data ECC", [15] = "Correctable Resilver- or Spare-Copy Data ECC", [14] = "Correctable Mirrored Demand Data ECC", [13] = "Correctable Non-Mirrored Demand Data ECC", [11] = "Memory or FBD configuration CRC read error", [10] = "FBD Configuration Write error on first attempt", [9] = "Memory Write error on first attempt", [8] = "Non-Aliased Uncorrectable Patrol Data ECC", [7] = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", [6] = "Non-Aliased Uncorrectable Mirrored Demand Data ECC", [5] = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC", [4] = "Aliased Uncorrectable Patrol Data ECC", [3] = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC", [2] = "Aliased Uncorrectable Mirrored Demand Data ECC", [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", [0] = "Uncorrectable Data ECC on Replay", }; #define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3) #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\ (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\ (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\ (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\ (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\ (1 << 1) | (1 << 0)) #define EMASK_FBD 0xa8 #define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\ (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\ (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\ (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\ (1 << 9) | (1 << 8) | (1 << 7) | (1 << 6) |\ (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2) |\ (1 << 1) | (1 << 0)) /* * Device 16.2: Global Error Registers */ #define FERR_GLOBAL_HI 0x48 static const char *ferr_global_hi_name[] = { [3] = "FSB 3 Fatal Error", [2] = "FSB 2 Fatal Error", [1] = "FSB 1 Fatal Error", [0] = "FSB 0 Fatal Error", }; #define ferr_global_hi_is_fatal(errno) 1 #define FERR_GLOBAL_LO 0x40 static const char *ferr_global_lo_name[] = { [31] = "Internal MCH Fatal Error", [30] = "Intel QuickData Technology Device Fatal Error", [29] = "FSB1 Fatal Error", [28] = "FSB0 Fatal Error", [27] = "FBD Channel 3 Fatal Error", [26] = "FBD Channel 2 Fatal Error", [25] = "FBD Channel 1 Fatal Error", [24] = "FBD Channel 0 Fatal Error", [23] = "PCI Express Device 7Fatal Error", [22] = "PCI Express Device 6 Fatal Error", [21] = "PCI Express Device 5 Fatal Error", [20] = "PCI Express Device 4 Fatal Error", [19] = "PCI Express Device 3 Fatal Error", [18] = "PCI Express Device 2 Fatal Error", [17] = "PCI Express Device 1 Fatal Error", [16] = "ESI Fatal Error", [15] = "Internal MCH Non-Fatal Error", [14] = "Intel QuickData Technology Device Non Fatal Error", [13] = "FSB1 Non-Fatal Error", [12] = "FSB 0 Non-Fatal Error", [11] = "FBD Channel 3 Non-Fatal Error", [10] = "FBD Channel 2 Non-Fatal Error", [9] = "FBD Channel 1 Non-Fatal Error", [8] = "FBD Channel 0 Non-Fatal Error", [7] = "PCI Express Device 7 Non-Fatal Error", [6] = "PCI Express Device 6 Non-Fatal Error", [5] = "PCI Express Device 5 Non-Fatal Error", [4] = "PCI Express Device 4 Non-Fatal Error", [3] = "PCI Express Device 3 Non-Fatal Error", [2] = "PCI Express Device 2 Non-Fatal Error", [1] = "PCI Express Device 1 Non-Fatal Error", [0] = "ESI Non-Fatal Error", }; #define ferr_global_lo_is_fatal(errno) ((errno < 16) ? 0 : 1) #define NRECMEMA 0xbe #define NRECMEMA_BANK(v) (((v) >> 12) & 7) #define NRECMEMA_RANK(v) (((v) >> 8) & 15) #define NRECMEMB 0xc0 #define NRECMEMB_IS_WR(v) ((v) & (1 << 31)) #define NRECMEMB_CAS(v) (((v) >> 16) & 0x1fff) #define NRECMEMB_RAS(v) ((v) & 0xffff) #define REDMEMA 0xdc #define REDMEMB 0x7c #define IS_SECOND_CH(v) ((v) * (1 << 17)) #define RECMEMA 0xe0 #define RECMEMA_BANK(v) (((v) >> 12) & 7) #define RECMEMA_RANK(v) (((v) >> 8) & 15) #define RECMEMB 0xe4 #define RECMEMB_IS_WR(v) ((v) & (1 << 31)) #define RECMEMB_CAS(v) (((v) >> 16) & 0x1fff) #define RECMEMB_RAS(v) ((v) & 0xffff) /******************************************** * i7300 Functions related to error detection ********************************************/ /** * get_err_from_table() - Gets the error message from a table * @table: table name (array of char *) * @size: number of elements at the table * @pos: position of the element to be returned * * This is a small routine that gets the pos-th element of a table. If the * element doesn't exist (or it is empty), it returns "reserved". * Instead of calling it directly, the better is to call via the macro * GET_ERR_FROM_TABLE(), that automatically checks the table size via * ARRAY_SIZE() macro */ static const char *get_err_from_table(const char *table[], int size, int pos) { if (unlikely(pos >= size)) return "Reserved"; if (unlikely(!table[pos])) return "Reserved"; return table[pos]; } #define GET_ERR_FROM_TABLE(table, pos) \ get_err_from_table(table, ARRAY_SIZE(table), pos) /** * i7300_process_error_global() - Retrieve the hardware error information from * the hardware global error registers and * sends it to dmesg * @mci: struct mem_ctl_info pointer */ static void i7300_process_error_global(struct mem_ctl_info *mci) { struct i7300_pvt *pvt; u32 errnum, error_reg; unsigned long errors; const char *specific; bool is_fatal; pvt = mci->pvt_info; /* read in the 1st FATAL error register */ pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, FERR_GLOBAL_HI, &error_reg); if (unlikely(error_reg)) { errors = error_reg; errnum = find_first_bit(&errors, ARRAY_SIZE(ferr_global_hi_name)); specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum); is_fatal = ferr_global_hi_is_fatal(errnum); /* Clear the error bit */ pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, FERR_GLOBAL_HI, error_reg); goto error_global; } pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, FERR_GLOBAL_LO, &error_reg); if (unlikely(error_reg)) { errors = error_reg; errnum = find_first_bit(&errors, ARRAY_SIZE(ferr_global_lo_name)); specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum); is_fatal = ferr_global_lo_is_fatal(errnum); /* Clear the error bit */ pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, FERR_GLOBAL_LO, error_reg); goto error_global; } return; error_global: i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n", is_fatal ? "Fatal" : "NOT fatal", specific); } /** * i7300_process_fbd_error() - Retrieve the hardware error information from * the FBD error registers and sends it via * EDAC error API calls * @mci: struct mem_ctl_info pointer */ static void i7300_process_fbd_error(struct mem_ctl_info *mci) { struct i7300_pvt *pvt; u32 errnum, value, error_reg; u16 val16; unsigned branch, channel, bank, rank, cas, ras; u32 syndrome; unsigned long errors; const char *specific; bool is_wr; pvt = mci->pvt_info; /* read in the 1st FATAL error register */ pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, FERR_FAT_FBD, &error_reg); if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) { errors = error_reg & FERR_FAT_FBD_ERR_MASK ; errnum = find_first_bit(&errors, ARRAY_SIZE(ferr_fat_fbd_name)); specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum); branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0; pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, NRECMEMA, &val16); bank = NRECMEMA_BANK(val16); rank = NRECMEMA_RANK(val16); pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, NRECMEMB, &value); is_wr = NRECMEMB_IS_WR(value); cas = NRECMEMB_CAS(value); ras = NRECMEMB_RAS(value); /* Clean the error register */ pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, FERR_FAT_FBD, error_reg); snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))", bank, ras, cas, errors, specific); edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0, branch, -1, rank, is_wr ? "Write error" : "Read error", pvt->tmp_prt_buffer); } /* read in the 1st NON-FATAL error register */ pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, FERR_NF_FBD, &error_reg); if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) { errors = error_reg & FERR_NF_FBD_ERR_MASK; errnum = find_first_bit(&errors, ARRAY_SIZE(ferr_nf_fbd_name)); specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum); branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0; pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, REDMEMA, &syndrome); pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, RECMEMA, &val16); bank = RECMEMA_BANK(val16); rank = RECMEMA_RANK(val16); pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, RECMEMB, &value); is_wr = RECMEMB_IS_WR(value); cas = RECMEMB_CAS(value); ras = RECMEMB_RAS(value); pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, REDMEMB, &value); channel = (branch << 1); if (IS_SECOND_CH(value)) channel++; /* Clear the error bit */ pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, FERR_NF_FBD, error_reg); /* Form out message */ snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))", bank, ras, cas, errors, specific); edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, syndrome, branch >> 1, channel % 2, rank, is_wr ? "Write error" : "Read error", pvt->tmp_prt_buffer); } return; } /** * i7300_check_error() - Calls the error checking subroutines * @mci: struct mem_ctl_info pointer */ static void i7300_check_error(struct mem_ctl_info *mci) { i7300_process_error_global(mci); i7300_process_fbd_error(mci); }; /** * i7300_clear_error() - Clears the error registers * @mci: struct mem_ctl_info pointer */ static void i7300_clear_error(struct mem_ctl_info *mci) { struct i7300_pvt *pvt = mci->pvt_info; u32 value; /* * All error values are RWC - we need to read and write 1 to the * bit that we want to cleanup */ /* Clear global error registers */ pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, FERR_GLOBAL_HI, &value); pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, FERR_GLOBAL_HI, value); pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs, FERR_GLOBAL_LO, &value); pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs, FERR_GLOBAL_LO, value); /* Clear FBD error registers */ pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, FERR_FAT_FBD, &value); pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, FERR_FAT_FBD, value); pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, FERR_NF_FBD, &value); pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, FERR_NF_FBD, value); } /** * i7300_enable_error_reporting() - Enable the memory reporting logic at the * hardware * @mci: struct mem_ctl_info pointer */ static void i7300_enable_error_reporting(struct mem_ctl_info *mci) { struct i7300_pvt *pvt = mci->pvt_info; u32 fbd_error_mask; /* Read the FBD Error Mask Register */ pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, EMASK_FBD, &fbd_error_mask); /* Enable with a '0' */ fbd_error_mask &= ~(EMASK_FBD_ERR_MASK); pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map, EMASK_FBD, fbd_error_mask); } /************************************************ * i7300 Functions related to memory enumberation ************************************************/ /** * decode_mtr() - Decodes the MTR descriptor, filling the edac structs * @pvt: pointer to the private data struct used by i7300 driver * @slot: DIMM slot (0 to 7) * @ch: Channel number within the branch (0 or 1) * @branch: Branch number (0 or 1) * @dinfo: Pointer to DIMM info where dimm size is stored * @p_csrow: Pointer to the struct csrow_info that corresponds to that element */ static int decode_mtr(struct i7300_pvt *pvt, int slot, int ch, int branch, struct i7300_dimm_info *dinfo, struct dimm_info *dimm) { int mtr, ans, addrBits, channel; channel = to_channel(ch, branch); mtr = pvt->mtr[slot][branch]; ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0; edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n", slot, channel, ans ? "" : "NOT "); /* Determine if there is a DIMM present in this DIMM slot */ if (!ans) return 0; /* Start with the number of bits for a Bank * on the DRAM */ addrBits = MTR_DRAM_BANKS_ADDR_BITS; /* Add thenumber of ROW bits */ addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); /* add the number of COLUMN bits */ addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); /* add the number of RANK bits */ addrBits += MTR_DIMM_RANKS(mtr); addrBits += 6; /* add 64 bits per DIMM */ addrBits -= 20; /* divide by 2^^20 */ addrBits -= 3; /* 8 bits per bytes */ dinfo->megabytes = 1 << addrBits; edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n", MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); edac_dbg(2, "\t\tNUMRANK: %s\n", MTR_DIMM_RANKS(mtr) ? "double" : "single"); edac_dbg(2, "\t\tNUMROW: %s\n", MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" : MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" : MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" : "65,536 - 16 rows"); edac_dbg(2, "\t\tNUMCOL: %s\n", MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" : MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" : MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" : "reserved"); edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes); /* * The type of error detection actually depends of the * mode of operation. When it is just one single memory chip, at * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code. * In normal or mirrored mode, it uses Lockstep mode, * with the possibility of using an extended algorithm for x8 memories * See datasheet Sections 7.3.6 to 7.3.8 */ dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes); dimm->grain = 8; dimm->mtype = MEM_FB_DDR2; if (IS_SINGLE_MODE(pvt->mc_settings_a)) { dimm->edac_mode = EDAC_SECDED; edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); } else { edac_dbg(2, "\t\tECC code is on Lockstep mode\n"); if (MTR_DRAM_WIDTH(mtr) == 8) dimm->edac_mode = EDAC_S8ECD8ED; else dimm->edac_mode = EDAC_S4ECD4ED; } /* ask what device type on this row */ if (MTR_DRAM_WIDTH(mtr) == 8) { edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n", IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? "enhanced" : "normal"); dimm->dtype = DEV_X8; } else dimm->dtype = DEV_X4; return mtr; } /** * print_dimm_size() - Prints dump of the memory organization * @pvt: pointer to the private data struct used by i7300 driver * * Useful for debug. If debug is disabled, this routine do nothing */ static void print_dimm_size(struct i7300_pvt *pvt) { #ifdef CONFIG_EDAC_DEBUG struct i7300_dimm_info *dinfo; char *p; int space, n; int channel, slot; space = PAGE_SIZE; p = pvt->tmp_prt_buffer; n = snprintf(p, space, " "); p += n; space -= n; for (channel = 0; channel < MAX_CHANNELS; channel++) { n = snprintf(p, space, "channel %d | ", channel); p += n; space -= n; } edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); p = pvt->tmp_prt_buffer; space = PAGE_SIZE; n = snprintf(p, space, "-------------------------------" "------------------------------"); p += n; space -= n; edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); p = pvt->tmp_prt_buffer; space = PAGE_SIZE; for (slot = 0; slot < MAX_SLOTS; slot++) { n = snprintf(p, space, "csrow/SLOT %d ", slot); p += n; space -= n; for (channel = 0; channel < MAX_CHANNELS; channel++) { dinfo = &pvt->dimm_info[slot][channel]; n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); p += n; space -= n; } edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); p = pvt->tmp_prt_buffer; space = PAGE_SIZE; } n = snprintf(p, space, "-------------------------------" "------------------------------"); p += n; space -= n; edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); p = pvt->tmp_prt_buffer; space = PAGE_SIZE; #endif } /** * i7300_init_csrows() - Initialize the 'csrows' table within * the mci control structure with the * addressing of memory. * @mci: struct mem_ctl_info pointer */ static int i7300_init_csrows(struct mem_ctl_info *mci) { struct i7300_pvt *pvt; struct i7300_dimm_info *dinfo; int rc = -ENODEV; int mtr; int ch, branch, slot, channel, max_channel, max_branch; struct dimm_info *dimm; pvt = mci->pvt_info; edac_dbg(2, "Memory Technology Registers:\n"); if (IS_SINGLE_MODE(pvt->mc_settings_a)) { max_branch = 1; max_channel = 1; } else { max_branch = MAX_BRANCHES; max_channel = MAX_CH_PER_BRANCH; } /* Get the AMB present registers for the four channels */ for (branch = 0; branch < max_branch; branch++) { /* Read and dump branch 0's MTRs */ channel = to_channel(0, branch); pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], AMBPRESENT_0, &pvt->ambpresent[channel]); edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n", channel, pvt->ambpresent[channel]); if (max_channel == 1) continue; channel = to_channel(1, branch); pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], AMBPRESENT_1, &pvt->ambpresent[channel]); edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n", channel, pvt->ambpresent[channel]); } /* Get the set of MTR[0-7] regs by each branch */ for (slot = 0; slot < MAX_SLOTS; slot++) { int where = mtr_regs[slot]; for (branch = 0; branch < max_branch; branch++) { pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], where, &pvt->mtr[slot][branch]); for (ch = 0; ch < max_channel; ch++) { int channel = to_channel(ch, branch); dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, branch, ch, slot); dinfo = &pvt->dimm_info[slot][channel]; mtr = decode_mtr(pvt, slot, ch, branch, dinfo, dimm); /* if no DIMMS on this row, continue */ if (!MTR_DIMMS_PRESENT(mtr)) continue; rc = 0; } } } return rc; } /** * decode_mir() - Decodes Memory Interleave Register (MIR) info * @int mir_no: number of the MIR register to decode * @mir: array with the MIR data cached on the driver */ static void decode_mir(int mir_no, u16 mir[MAX_MIR]) { if (mir[mir_no] & 3) edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n", mir_no, (mir[mir_no] >> 4) & 0xfff, (mir[mir_no] & 1) ? "B0" : "", (mir[mir_no] & 2) ? "B1" : ""); } /** * i7300_get_mc_regs() - Get the contents of the MC enumeration registers * @mci: struct mem_ctl_info pointer * * Data read is cached internally for its usage when needed */ static int i7300_get_mc_regs(struct mem_ctl_info *mci) { struct i7300_pvt *pvt; u32 actual_tolm; int i, rc; pvt = mci->pvt_info; pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE, (u32 *) &pvt->ambase); edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase); /* Get the Branch Map regs */ pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm); pvt->tolm >>= 12; edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, pvt->tolm); actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n", actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); /* Get memory controller settings */ pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS, &pvt->mc_settings); pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A, &pvt->mc_settings_a); if (IS_SINGLE_MODE(pvt->mc_settings_a)) edac_dbg(0, "Memory controller operating on single mode\n"); else edac_dbg(0, "Memory controller operating on %smirrored mode\n", IS_MIRRORED(pvt->mc_settings) ? "" : "non-"); edac_dbg(0, "Error detection is %s\n", IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); edac_dbg(0, "Retry is %s\n", IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); /* Get Memory Interleave Range registers */ pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0, &pvt->mir[0]); pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1, &pvt->mir[1]); pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2, &pvt->mir[2]); /* Decode the MIR regs */ for (i = 0; i < MAX_MIR; i++) decode_mir(i, pvt->mir); rc = i7300_init_csrows(mci); if (rc < 0) return rc; /* Go and determine the size of each DIMM and place in an * orderly matrix */ print_dimm_size(pvt); return 0; } /************************************************* * i7300 Functions related to device probe/release *************************************************/ /** * i7300_put_devices() - Release the PCI devices * @mci: struct mem_ctl_info pointer */ static void i7300_put_devices(struct mem_ctl_info *mci) { struct i7300_pvt *pvt; int branch; pvt = mci->pvt_info; /* Decrement usage count for devices */ for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++) pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]); pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs); pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map); } /** * i7300_get_devices() - Find and perform 'get' operation on the MCH's * device/functions we want to reference for this driver * @mci: struct mem_ctl_info pointer * * Access and prepare the several devices for usage: * I7300 devices used by this driver: * Device 16, functions 0,1 and 2: PCI_DEVICE_ID_INTEL_I7300_MCH_ERR * Device 21 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 * Device 22 function 0: PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 */ static int i7300_get_devices(struct mem_ctl_info *mci) { struct i7300_pvt *pvt; struct pci_dev *pdev; pvt = mci->pvt_info; /* Attempt to 'get' the MCH register we want */ pdev = NULL; while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev))) { /* Store device 16 funcs 1 and 2 */ switch (PCI_FUNC(pdev->devfn)) { case 1: if (!pvt->pci_dev_16_1_fsb_addr_map) pvt->pci_dev_16_1_fsb_addr_map = pci_dev_get(pdev); break; case 2: if (!pvt->pci_dev_16_2_fsb_err_regs) pvt->pci_dev_16_2_fsb_err_regs = pci_dev_get(pdev); break; } } if (!pvt->pci_dev_16_1_fsb_addr_map || !pvt->pci_dev_16_2_fsb_err_regs) { /* At least one device was not found */ i7300_printk(KERN_ERR, "'system address,Process Bus' device not found:" "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR); goto error; } edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n", pci_name(pvt->pci_dev_16_0_fsb_ctlr), pvt->pci_dev_16_0_fsb_ctlr->vendor, pvt->pci_dev_16_0_fsb_ctlr->device); edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", pci_name(pvt->pci_dev_16_1_fsb_addr_map), pvt->pci_dev_16_1_fsb_addr_map->vendor, pvt->pci_dev_16_1_fsb_addr_map->device); edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n", pci_name(pvt->pci_dev_16_2_fsb_err_regs), pvt->pci_dev_16_2_fsb_err_regs->vendor, pvt->pci_dev_16_2_fsb_err_regs->device); pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0, NULL); if (!pvt->pci_dev_2x_0_fbd_branch[0]) { i7300_printk(KERN_ERR, "MC: 'BRANCH 0' device not found:" "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0); goto error; } pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB1, NULL); if (!pvt->pci_dev_2x_0_fbd_branch[1]) { i7300_printk(KERN_ERR, "MC: 'BRANCH 1' device not found:" "vendor 0x%x device 0x%x Func 0 " "(broken BIOS?)\n", PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB1); goto error; } return 0; error: i7300_put_devices(mci); return -ENODEV; } /** * i7300_init_one() - Probe for one instance of the device * @pdev: struct pci_dev pointer * @id: struct pci_device_id pointer - currently unused */ static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mem_ctl_info *mci; struct edac_mc_layer layers[3]; struct i7300_pvt *pvt; int rc; /* wake up device */ rc = pci_enable_device(pdev); if (rc == -EIO) return rc; edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); /* We only are looking for func 0 of the set */ if (PCI_FUNC(pdev->devfn) != 0) return -ENODEV; /* allocate a new MC control structure */ layers[0].type = EDAC_MC_LAYER_BRANCH; layers[0].size = MAX_BRANCHES; layers[0].is_virt_csrow = false; layers[1].type = EDAC_MC_LAYER_CHANNEL; layers[1].size = MAX_CH_PER_BRANCH; layers[1].is_virt_csrow = true; layers[2].type = EDAC_MC_LAYER_SLOT; layers[2].size = MAX_SLOTS; layers[2].is_virt_csrow = true; mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); if (mci == NULL) return -ENOMEM; edac_dbg(0, "MC: mci = %p\n", mci); mci->pdev = &pdev->dev; /* record ptr to the generic device */ pvt = mci->pvt_info; pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */ pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!pvt->tmp_prt_buffer) { edac_mc_free(mci); return -ENOMEM; } /* 'get' the pci devices we want to reserve for our use */ if (i7300_get_devices(mci)) goto fail0; mci->mc_idx = 0; mci->mtype_cap = MEM_FLAG_FB_DDR2; mci->edac_ctl_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE; mci->mod_name = "i7300_edac.c"; mci->mod_ver = I7300_REVISION; mci->ctl_name = i7300_devs[0].ctl_name; mci->dev_name = pci_name(pdev); mci->ctl_page_to_phys = NULL; /* Set the function pointer to an actual operation function */ mci->edac_check = i7300_check_error; /* initialize the MC control structure 'csrows' table * with the mapping and control information */ if (i7300_get_mc_regs(mci)) { edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n"); mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ } else { edac_dbg(1, "MC: Enable error reporting now\n"); i7300_enable_error_reporting(mci); } /* add this new MC control structure to EDAC's list of MCs */ if (edac_mc_add_mc(mci)) { edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ goto fail1; } i7300_clear_error(mci); /* allocating generic PCI control info */ i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); if (!i7300_pci) { printk(KERN_WARNING "%s(): Unable to create PCI control\n", __func__); printk(KERN_WARNING "%s(): PCI error report via EDAC not setup\n", __func__); } return 0; /* Error exit unwinding stack */ fail1: i7300_put_devices(mci); fail0: kfree(pvt->tmp_prt_buffer); edac_mc_free(mci); return -ENODEV; } /** * i7300_remove_one() - Remove the driver * @pdev: struct pci_dev pointer */ static void i7300_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; char *tmp; edac_dbg(0, "\n"); if (i7300_pci) edac_pci_release_generic_ctl(i7300_pci); mci = edac_mc_del_mc(&pdev->dev); if (!mci) return; tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer; /* retrieve references to resources, and free those resources */ i7300_put_devices(mci); kfree(tmp); edac_mc_free(mci); } /* * pci_device_id: table for which devices we are looking for * * Has only 8086:360c PCI ID */ static DEFINE_PCI_DEVICE_TABLE(i7300_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)}, {0,} /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i7300_pci_tbl); /* * i7300_driver: pci_driver structure for this module */ static struct pci_driver i7300_driver = { .name = "i7300_edac", .probe = i7300_init_one, .remove = i7300_remove_one, .id_table = i7300_pci_tbl, }; /** * i7300_init() - Registers the driver */ static int __init i7300_init(void) { int pci_rc; edac_dbg(2, "\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); pci_rc = pci_register_driver(&i7300_driver); return (pci_rc < 0) ? pci_rc : 0; } /** * i7300_init() - Unregisters the driver */ static void __exit i7300_exit(void) { edac_dbg(2, "\n"); pci_unregister_driver(&i7300_driver); } module_init(i7300_init); module_exit(i7300_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - " I7300_REVISION); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
MattCrystal/HTC-One---4.4-Linaro
drivers/cpufreq/exynos-cpufreq.c
4510
7678
/* * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * EXYNOS - CPU frequency scaling support for EXYNOS series * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> #include <linux/cpufreq.h> #include <linux/suspend.h> #include <mach/cpufreq.h> #include <plat/cpu.h> static struct exynos_dvfs_info *exynos_info; static struct regulator *arm_regulator; static struct cpufreq_freqs freqs; static unsigned int locking_frequency; static bool frequency_locked; static DEFINE_MUTEX(cpufreq_lock); int exynos_verify_speed(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, exynos_info->freq_table); } unsigned int exynos_getspeed(unsigned int cpu) { return clk_get_rate(exynos_info->cpu_clk) / 1000; } static int exynos_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int index, old_index; unsigned int arm_volt, safe_arm_volt = 0; int ret = 0; struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; unsigned int *volt_table = exynos_info->volt_table; unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz; mutex_lock(&cpufreq_lock); freqs.old = policy->cur; if (frequency_locked && target_freq != locking_frequency) { ret = -EAGAIN; goto out; } if (cpufreq_frequency_table_target(policy, freq_table, freqs.old, relation, &old_index)) { ret = -EINVAL; goto out; } if (cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &index)) { ret = -EINVAL; goto out; } freqs.new = freq_table[index].frequency; freqs.cpu = policy->cpu; /* * ARM clock source will be changed APLL to MPLL temporary * To support this level, need to control regulator for * required voltage level */ if (exynos_info->need_apll_change != NULL) { if (exynos_info->need_apll_change(old_index, index) && (freq_table[index].frequency < mpll_freq_khz) && (freq_table[old_index].frequency < mpll_freq_khz)) safe_arm_volt = volt_table[exynos_info->pll_safe_idx]; } arm_volt = volt_table[index]; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); /* When the new frequency is higher than current frequency */ if ((freqs.new > freqs.old) && !safe_arm_volt) { /* Firstly, voltage up to increase frequency */ regulator_set_voltage(arm_regulator, arm_volt, arm_volt); } if (safe_arm_volt) regulator_set_voltage(arm_regulator, safe_arm_volt, safe_arm_volt); if (freqs.new != freqs.old) exynos_info->set_freq(old_index, index); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); /* When the new frequency is lower than current frequency */ if ((freqs.new < freqs.old) || ((freqs.new > freqs.old) && safe_arm_volt)) { /* down the voltage after frequency change */ regulator_set_voltage(arm_regulator, arm_volt, arm_volt); } out: mutex_unlock(&cpufreq_lock); return ret; } #ifdef CONFIG_PM static int exynos_cpufreq_suspend(struct cpufreq_policy *policy) { return 0; } static int exynos_cpufreq_resume(struct cpufreq_policy *policy) { return 0; } #endif /** * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume * context * @notifier * @pm_event * @v * * While frequency_locked == true, target() ignores every frequency but * locking_frequency. The locking_frequency value is the initial frequency, * which is set by the bootloader. In order to eliminate possible * inconsistency in clock values, we save and restore frequencies during * suspend and resume and block CPUFREQ activities. Note that the standard * suspend/resume cannot be used as they are too deep (syscore_ops) for * regulator actions. */ static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier, unsigned long pm_event, void *v) { struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */ static unsigned int saved_frequency; unsigned int temp; mutex_lock(&cpufreq_lock); switch (pm_event) { case PM_SUSPEND_PREPARE: if (frequency_locked) goto out; frequency_locked = true; if (locking_frequency) { saved_frequency = exynos_getspeed(0); mutex_unlock(&cpufreq_lock); exynos_target(policy, locking_frequency, CPUFREQ_RELATION_H); mutex_lock(&cpufreq_lock); } break; case PM_POST_SUSPEND: if (saved_frequency) { /* * While frequency_locked, only locking_frequency * is valid for target(). In order to use * saved_frequency while keeping frequency_locked, * we temporarly overwrite locking_frequency. */ temp = locking_frequency; locking_frequency = saved_frequency; mutex_unlock(&cpufreq_lock); exynos_target(policy, locking_frequency, CPUFREQ_RELATION_H); mutex_lock(&cpufreq_lock); locking_frequency = temp; } frequency_locked = false; break; } out: mutex_unlock(&cpufreq_lock); return NOTIFY_OK; } static struct notifier_block exynos_cpufreq_nb = { .notifier_call = exynos_cpufreq_pm_notifier, }; static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) { policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu); cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu); locking_frequency = exynos_getspeed(0); /* set the transition latency value */ policy->cpuinfo.transition_latency = 100000; /* * EXYNOS4 multi-core processors has 2 cores * that the frequency cannot be set independently. * Each cpu is bound to the same speed. * So the affected cpu is all of the cpus. */ if (num_online_cpus() == 1) { cpumask_copy(policy->related_cpus, cpu_possible_mask); cpumask_copy(policy->cpus, cpu_online_mask); } else { cpumask_setall(policy->cpus); } return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table); } static struct cpufreq_driver exynos_driver = { .flags = CPUFREQ_STICKY, .verify = exynos_verify_speed, .target = exynos_target, .get = exynos_getspeed, .init = exynos_cpufreq_cpu_init, .name = "exynos_cpufreq", #ifdef CONFIG_PM .suspend = exynos_cpufreq_suspend, .resume = exynos_cpufreq_resume, #endif }; static int __init exynos_cpufreq_init(void) { int ret = -EINVAL; exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL); if (!exynos_info) return -ENOMEM; if (soc_is_exynos4210()) ret = exynos4210_cpufreq_init(exynos_info); else if (soc_is_exynos4212() || soc_is_exynos4412()) ret = exynos4x12_cpufreq_init(exynos_info); else if (soc_is_exynos5250()) ret = exynos5250_cpufreq_init(exynos_info); else pr_err("%s: CPU type not found\n", __func__); if (ret) goto err_vdd_arm; if (exynos_info->set_freq == NULL) { pr_err("%s: No set_freq function (ERR)\n", __func__); goto err_vdd_arm; } arm_regulator = regulator_get(NULL, "vdd_arm"); if (IS_ERR(arm_regulator)) { pr_err("%s: failed to get resource vdd_arm\n", __func__); goto err_vdd_arm; } register_pm_notifier(&exynos_cpufreq_nb); if (cpufreq_register_driver(&exynos_driver)) { pr_err("%s: failed to register cpufreq driver\n", __func__); goto err_cpufreq; } return 0; err_cpufreq: unregister_pm_notifier(&exynos_cpufreq_nb); if (!IS_ERR(arm_regulator)) regulator_put(arm_regulator); err_vdd_arm: kfree(exynos_info); pr_debug("%s: failed initialization\n", __func__); return -EINVAL; } late_initcall(exynos_cpufreq_init);
gpl-2.0
nazunamoe/Oxygen_united_kernel-gproj
arch/mips/math-emu/ieee754dp.c
7838
5377
/* IEEE754 floating point arithmetic * double precision: common utilities */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754dp.h" int ieee754dp_class(ieee754dp x) { COMPXDP; EXPLODEXDP; return xc; } int ieee754dp_isnan(ieee754dp x) { return ieee754dp_class(x) >= IEEE754_CLASS_SNAN; } int ieee754dp_issnan(ieee754dp x) { assert(ieee754dp_isnan(x)); return ((DPMANT(x) & DP_MBIT(DP_MBITS-1)) == DP_MBIT(DP_MBITS-1)); } ieee754dp ieee754dp_xcpt(ieee754dp r, const char *op, ...) { struct ieee754xctx ax; if (!TSTX()) return r; ax.op = op; ax.rt = IEEE754_RT_DP; ax.rv.dp = r; va_start(ax.ap, op); ieee754_xcpt(&ax); va_end(ax.ap); return ax.rv.dp; } ieee754dp ieee754dp_nanxcpt(ieee754dp r, const char *op, ...) { struct ieee754xctx ax; assert(ieee754dp_isnan(r)); if (!ieee754dp_issnan(r)) /* QNAN does not cause invalid op !! */ return r; if (!SETANDTESTCX(IEEE754_INVALID_OPERATION)) { /* not enabled convert to a quiet NaN */ DPMANT(r) &= (~DP_MBIT(DP_MBITS-1)); if (ieee754dp_isnan(r)) return r; else return ieee754dp_indef(); } ax.op = op; ax.rt = 0; ax.rv.dp = r; va_start(ax.ap, op); ieee754_xcpt(&ax); va_end(ax.ap); return ax.rv.dp; } ieee754dp ieee754dp_bestnan(ieee754dp x, ieee754dp y) { assert(ieee754dp_isnan(x)); assert(ieee754dp_isnan(y)); if (DPMANT(x) > DPMANT(y)) return x; else return y; } static u64 get_rounding(int sn, u64 xm) { /* inexact must round of 3 bits */ if (xm & (DP_MBIT(3) - 1)) { switch (ieee754_csr.rm) { case IEEE754_RZ: break; case IEEE754_RN: xm += 0x3 + ((xm >> 3) & 1); /* xm += (xm&0x8)?0x4:0x3 */ break; case IEEE754_RU: /* toward +Infinity */ if (!sn) /* ?? */ xm += 0x8; break; case IEEE754_RD: /* toward -Infinity */ if (sn) /* ?? */ xm += 0x8; break; } } return xm; } /* generate a normal/denormal number with over,under handling * sn is sign * xe is an unbiased exponent * xm is 3bit extended precision value. */ ieee754dp ieee754dp_format(int sn, int xe, u64 xm) { assert(xm); /* we don't gen exact zeros (probably should) */ assert((xm >> (DP_MBITS + 1 + 3)) == 0); /* no execess */ assert(xm & (DP_HIDDEN_BIT << 3)); if (xe < DP_EMIN) { /* strip lower bits */ int es = DP_EMIN - xe; if (ieee754_csr.nod) { SETCX(IEEE754_UNDERFLOW); SETCX(IEEE754_INEXACT); switch(ieee754_csr.rm) { case IEEE754_RN: case IEEE754_RZ: return ieee754dp_zero(sn); case IEEE754_RU: /* toward +Infinity */ if(sn == 0) return ieee754dp_min(0); else return ieee754dp_zero(1); case IEEE754_RD: /* toward -Infinity */ if(sn == 0) return ieee754dp_zero(0); else return ieee754dp_min(1); } } if (xe == DP_EMIN - 1 && get_rounding(sn, xm) >> (DP_MBITS + 1 + 3)) { /* Not tiny after rounding */ SETCX(IEEE754_INEXACT); xm = get_rounding(sn, xm); xm >>= 1; /* Clear grs bits */ xm &= ~(DP_MBIT(3) - 1); xe++; } else { /* sticky right shift es bits */ xm = XDPSRS(xm, es); xe += es; assert((xm & (DP_HIDDEN_BIT << 3)) == 0); assert(xe == DP_EMIN); } } if (xm & (DP_MBIT(3) - 1)) { SETCX(IEEE754_INEXACT); if ((xm & (DP_HIDDEN_BIT << 3)) == 0) { SETCX(IEEE754_UNDERFLOW); } /* inexact must round of 3 bits */ xm = get_rounding(sn, xm); /* adjust exponent for rounding add overflowing */ if (xm >> (DP_MBITS + 3 + 1)) { /* add causes mantissa overflow */ xm >>= 1; xe++; } } /* strip grs bits */ xm >>= 3; assert((xm >> (DP_MBITS + 1)) == 0); /* no execess */ assert(xe >= DP_EMIN); if (xe > DP_EMAX) { SETCX(IEEE754_OVERFLOW); SETCX(IEEE754_INEXACT); /* -O can be table indexed by (rm,sn) */ switch (ieee754_csr.rm) { case IEEE754_RN: return ieee754dp_inf(sn); case IEEE754_RZ: return ieee754dp_max(sn); case IEEE754_RU: /* toward +Infinity */ if (sn == 0) return ieee754dp_inf(0); else return ieee754dp_max(1); case IEEE754_RD: /* toward -Infinity */ if (sn == 0) return ieee754dp_max(0); else return ieee754dp_inf(1); } } /* gen norm/denorm/zero */ if ((xm & DP_HIDDEN_BIT) == 0) { /* we underflow (tiny/zero) */ assert(xe == DP_EMIN); if (ieee754_csr.mx & IEEE754_UNDERFLOW) SETCX(IEEE754_UNDERFLOW); return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm); } else { assert((xm >> (DP_MBITS + 1)) == 0); /* no execess */ assert(xm & DP_HIDDEN_BIT); return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); } }
gpl-2.0
dedzt16/dedzt16
arch/mn10300/lib/bitops.c
9118
1087
/* MN10300 Non-trivial bit operations * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <asm/bitops.h> /* * try flipping a bit using BSET and BCLR */ void change_bit(unsigned long nr, volatile void *addr) { if (test_bit(nr, addr)) goto try_clear_bit; try_set_bit: if (!test_and_set_bit(nr, addr)) return; try_clear_bit: if (test_and_clear_bit(nr, addr)) return; goto try_set_bit; } /* * try flipping a bit using BSET and BCLR and returning the old value */ int test_and_change_bit(unsigned long nr, volatile void *addr) { if (test_bit(nr, addr)) goto try_clear_bit; try_set_bit: if (!test_and_set_bit(nr, addr)) return 0; try_clear_bit: if (test_and_clear_bit(nr, addr)) return 1; goto try_set_bit; }
gpl-2.0
kello711/linux
arch/mips/cobalt/time.c
9886
1476
/* * Cobalt time initialization. * * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/i8253.h> #include <linux/init.h> #include <asm/gt64120.h> #include <asm/time.h> #define GT641XX_BASE_CLOCK 50000000 /* 50MHz */ void __init plat_time_init(void) { u32 start, end; int i = HZ / 10; setup_pit_timer(); gt641xx_set_base_clock(GT641XX_BASE_CLOCK); /* * MIPS counter frequency is measured during a 100msec interval * using GT64111 timer0. */ while (!gt641xx_timer0_state()) ; start = read_c0_count(); while (i--) while (!gt641xx_timer0_state()) ; end = read_c0_count(); mips_hpt_frequency = (end - start) * 10; printk(KERN_INFO "MIPS counter frequency %dHz\n", mips_hpt_frequency); }
gpl-2.0
coldnew/linux
fs/jfs/jfs_debug.c
14238
2823
/* * Copyright (C) International Business Machines Corp., 2000-2004 * Portions Copyright (C) Christoph Hellwig, 2001-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/ctype.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/uaccess.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_debug.h" #ifdef PROC_FS_JFS /* see jfs_debug.h */ static struct proc_dir_entry *base; #ifdef CONFIG_JFS_DEBUG static int jfs_loglevel_proc_show(struct seq_file *m, void *v) { seq_printf(m, "%d\n", jfsloglevel); return 0; } static int jfs_loglevel_proc_open(struct inode *inode, struct file *file) { return single_open(file, jfs_loglevel_proc_show, NULL); } static ssize_t jfs_loglevel_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { char c; if (get_user(c, buffer)) return -EFAULT; /* yes, I know this is an ASCIIism. --hch */ if (c < '0' || c > '9') return -EINVAL; jfsloglevel = c - '0'; return count; } static const struct file_operations jfs_loglevel_proc_fops = { .owner = THIS_MODULE, .open = jfs_loglevel_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = jfs_loglevel_proc_write, }; #endif static struct { const char *name; const struct file_operations *proc_fops; } Entries[] = { #ifdef CONFIG_JFS_STATISTICS { "lmstats", &jfs_lmstats_proc_fops, }, { "txstats", &jfs_txstats_proc_fops, }, { "xtstat", &jfs_xtstat_proc_fops, }, { "mpstat", &jfs_mpstat_proc_fops, }, #endif #ifdef CONFIG_JFS_DEBUG { "TxAnchor", &jfs_txanchor_proc_fops, }, { "loglevel", &jfs_loglevel_proc_fops } #endif }; #define NPROCENT ARRAY_SIZE(Entries) void jfs_proc_init(void) { int i; if (!(base = proc_mkdir("fs/jfs", NULL))) return; for (i = 0; i < NPROCENT; i++) proc_create(Entries[i].name, 0, base, Entries[i].proc_fops); } void jfs_proc_clean(void) { int i; if (base) { for (i = 0; i < NPROCENT; i++) remove_proc_entry(Entries[i].name, base); remove_proc_entry("fs/jfs", NULL); } } #endif /* PROC_FS_JFS */
gpl-2.0
tellapart/ubuntu-precise
drivers/power/power_supply_sysfs.c
159
8073
/* * Sysfs interface for the universal power supply monitor class * * Copyright © 2007 David Woodhouse <dwmw2@infradead.org> * Copyright © 2007 Anton Vorontsov <cbou@mail.ru> * Copyright © 2004 Szabolcs Gyurko * Copyright © 2003 Ian Molton <spyro@f2s.com> * * Modified: 2004, Oct Szabolcs Gyurko * * You may use this code as per GPL version 2 */ #include <linux/ctype.h> #include <linux/power_supply.h> #include <linux/slab.h> #include <linux/stat.h> #include "power_supply.h" /* * This is because the name "current" breaks the device attr macro. * The "current" word resolves to "(get_current())" so instead of * "current" "(get_current())" appears in the sysfs. * * The source of this definition is the device.h which calls __ATTR * macro in sysfs.h which calls the __stringify macro. * * Only modification that the name is not tried to be resolved * (as a macro let's say). */ #define POWER_SUPPLY_ATTR(_name) \ { \ .attr = { .name = #_name }, \ .show = power_supply_show_property, \ .store = power_supply_store_property, \ } static struct device_attribute power_supply_attrs[]; static ssize_t power_supply_show_property(struct device *dev, struct device_attribute *attr, char *buf) { static char *type_text[] = { "Battery", "UPS", "Mains", "USB", "USB_DCP", "USB_CDP", "USB_ACA" }; static char *status_text[] = { "Unknown", "Charging", "Discharging", "Not charging", "Full" }; static char *charge_type[] = { "Unknown", "N/A", "Trickle", "Fast" }; static char *health_text[] = { "Unknown", "Good", "Overheat", "Dead", "Over voltage", "Unspecified failure", "Cold", }; static char *technology_text[] = { "Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd", "LiMn" }; static char *capacity_level_text[] = { "Unknown", "Critical", "Low", "Normal", "High", "Full" }; ssize_t ret = 0; struct power_supply *psy = dev_get_drvdata(dev); const ptrdiff_t off = attr - power_supply_attrs; union power_supply_propval value; if (off == POWER_SUPPLY_PROP_TYPE) value.intval = psy->type; else ret = psy->get_property(psy, off, &value); if (ret < 0) { if (ret == -ENODATA) dev_dbg(dev, "driver has no data for `%s' property\n", attr->attr.name); else if (ret != -ENODEV) dev_err(dev, "driver failed to report `%s' property\n", attr->attr.name); return ret; } if (off == POWER_SUPPLY_PROP_STATUS) return sprintf(buf, "%s\n", status_text[value.intval]); else if (off == POWER_SUPPLY_PROP_CHARGE_TYPE) return sprintf(buf, "%s\n", charge_type[value.intval]); else if (off == POWER_SUPPLY_PROP_HEALTH) return sprintf(buf, "%s\n", health_text[value.intval]); else if (off == POWER_SUPPLY_PROP_TECHNOLOGY) return sprintf(buf, "%s\n", technology_text[value.intval]); else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL) return sprintf(buf, "%s\n", capacity_level_text[value.intval]); else if (off == POWER_SUPPLY_PROP_TYPE) return sprintf(buf, "%s\n", type_text[value.intval]); else if (off >= POWER_SUPPLY_PROP_MODEL_NAME) return sprintf(buf, "%s\n", value.strval); return sprintf(buf, "%d\n", value.intval); } static ssize_t power_supply_store_property(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { ssize_t ret; struct power_supply *psy = dev_get_drvdata(dev); const ptrdiff_t off = attr - power_supply_attrs; union power_supply_propval value; long long_val; /* TODO: support other types than int */ ret = strict_strtol(buf, 10, &long_val); if (ret < 0) return ret; value.intval = long_val; ret = psy->set_property(psy, off, &value); if (ret < 0) return ret; return count; } /* Must be in the same order as POWER_SUPPLY_PROP_* */ static struct device_attribute power_supply_attrs[] = { /* Properties of type `int' */ POWER_SUPPLY_ATTR(status), POWER_SUPPLY_ATTR(charge_type), POWER_SUPPLY_ATTR(health), POWER_SUPPLY_ATTR(present), POWER_SUPPLY_ATTR(online), POWER_SUPPLY_ATTR(technology), POWER_SUPPLY_ATTR(cycle_count), POWER_SUPPLY_ATTR(voltage_max), POWER_SUPPLY_ATTR(voltage_min), POWER_SUPPLY_ATTR(voltage_max_design), POWER_SUPPLY_ATTR(voltage_min_design), POWER_SUPPLY_ATTR(voltage_now), POWER_SUPPLY_ATTR(voltage_avg), POWER_SUPPLY_ATTR(current_max), POWER_SUPPLY_ATTR(current_now), POWER_SUPPLY_ATTR(current_avg), POWER_SUPPLY_ATTR(power_now), POWER_SUPPLY_ATTR(power_avg), POWER_SUPPLY_ATTR(charge_full_design), POWER_SUPPLY_ATTR(charge_empty_design), POWER_SUPPLY_ATTR(charge_full), POWER_SUPPLY_ATTR(charge_empty), POWER_SUPPLY_ATTR(charge_now), POWER_SUPPLY_ATTR(charge_avg), POWER_SUPPLY_ATTR(charge_counter), POWER_SUPPLY_ATTR(energy_full_design), POWER_SUPPLY_ATTR(energy_empty_design), POWER_SUPPLY_ATTR(energy_full), POWER_SUPPLY_ATTR(energy_empty), POWER_SUPPLY_ATTR(energy_now), POWER_SUPPLY_ATTR(energy_avg), POWER_SUPPLY_ATTR(capacity), POWER_SUPPLY_ATTR(capacity_level), POWER_SUPPLY_ATTR(temp), POWER_SUPPLY_ATTR(temp_ambient), POWER_SUPPLY_ATTR(time_to_empty_now), POWER_SUPPLY_ATTR(time_to_empty_avg), POWER_SUPPLY_ATTR(time_to_full_now), POWER_SUPPLY_ATTR(time_to_full_avg), POWER_SUPPLY_ATTR(type), /* Properties of type `const char *' */ POWER_SUPPLY_ATTR(model_name), POWER_SUPPLY_ATTR(manufacturer), POWER_SUPPLY_ATTR(serial_number), }; static struct attribute * __power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1]; static mode_t power_supply_attr_is_visible(struct kobject *kobj, struct attribute *attr, int attrno) { struct device *dev = container_of(kobj, struct device, kobj); struct power_supply *psy = dev_get_drvdata(dev); mode_t mode = S_IRUSR | S_IRGRP | S_IROTH; int i; if (attrno == POWER_SUPPLY_PROP_TYPE) return mode; for (i = 0; i < psy->num_properties; i++) { int property = psy->properties[i]; if (property == attrno) { if (psy->property_is_writeable && psy->property_is_writeable(psy, property) > 0) mode |= S_IWUSR; return mode; } } return 0; } static struct attribute_group power_supply_attr_group = { .attrs = __power_supply_attrs, .is_visible = power_supply_attr_is_visible, }; static const struct attribute_group *power_supply_attr_groups[] = { &power_supply_attr_group, NULL, }; void power_supply_init_attrs(struct device_type *dev_type) { int i; dev_type->groups = power_supply_attr_groups; for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++) __power_supply_attrs[i] = &power_supply_attrs[i].attr; } static char *kstruprdup(const char *str, gfp_t gfp) { char *ret, *ustr; ustr = ret = kmalloc(strlen(str) + 1, gfp); if (!ret) return NULL; while (*str) *ustr++ = toupper(*str++); *ustr = 0; return ret; } int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env) { struct power_supply *psy = dev_get_drvdata(dev); int ret = 0, j; char *prop_buf; char *attrname; dev_dbg(dev, "uevent\n"); if (!psy || !psy->dev) { dev_dbg(dev, "No power supply yet\n"); return ret; } dev_dbg(dev, "POWER_SUPPLY_NAME=%s\n", psy->name); ret = add_uevent_var(env, "POWER_SUPPLY_NAME=%s", psy->name); if (ret) return ret; prop_buf = (char *)get_zeroed_page(GFP_KERNEL); if (!prop_buf) return -ENOMEM; for (j = 0; j < psy->num_properties; j++) { struct device_attribute *attr; char *line; attr = &power_supply_attrs[psy->properties[j]]; ret = power_supply_show_property(dev, attr, prop_buf); if (ret == -ENODEV || ret == -ENODATA) { /* When a battery is absent, we expect -ENODEV. Don't abort; send the uevent with at least the the PRESENT=0 property */ ret = 0; continue; } if (ret < 0) goto out; line = strchr(prop_buf, '\n'); if (line) *line = 0; attrname = kstruprdup(attr->attr.name, GFP_KERNEL); if (!attrname) { ret = -ENOMEM; goto out; } dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf); ret = add_uevent_var(env, "POWER_SUPPLY_%s=%s", attrname, prop_buf); kfree(attrname); if (ret) goto out; } out: free_page((unsigned long)prop_buf); return ret; }
gpl-2.0
chenxuhua/linux
drivers/net/wireless/libertas/if_cs.c
2207
26212
/* Driver for the Marvell 8385 based compact flash WLAN cards. (C) 2007 by Holger Schurig <hs4233@mail.mn-solutions.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/netdevice.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <linux/io.h> #define DRV_NAME "libertas_cs" #include "decl.h" #include "defs.h" #include "dev.h" /********************************************************************/ /* Module stuff */ /********************************************************************/ MODULE_AUTHOR("Holger Schurig <hs4233@mail.mn-solutions.de>"); MODULE_DESCRIPTION("Driver for Marvell 83xx compact flash WLAN cards"); MODULE_LICENSE("GPL"); /********************************************************************/ /* Data structures */ /********************************************************************/ struct if_cs_card { struct pcmcia_device *p_dev; struct lbs_private *priv; void __iomem *iobase; bool align_regs; u32 model; }; enum { MODEL_UNKNOWN = 0x00, MODEL_8305 = 0x01, MODEL_8381 = 0x02, MODEL_8385 = 0x03 }; static const struct lbs_fw_table fw_table[] = { { MODEL_8305, "libertas/cf8305.bin", NULL }, { MODEL_8305, "libertas_cs_helper.fw", NULL }, { MODEL_8381, "libertas/cf8381_helper.bin", "libertas/cf8381.bin" }, { MODEL_8381, "libertas_cs_helper.fw", "libertas_cs.fw" }, { MODEL_8385, "libertas/cf8385_helper.bin", "libertas/cf8385.bin" }, { MODEL_8385, "libertas_cs_helper.fw", "libertas_cs.fw" }, { 0, NULL, NULL } }; MODULE_FIRMWARE("libertas/cf8305.bin"); MODULE_FIRMWARE("libertas/cf8381_helper.bin"); MODULE_FIRMWARE("libertas/cf8381.bin"); MODULE_FIRMWARE("libertas/cf8385_helper.bin"); MODULE_FIRMWARE("libertas/cf8385.bin"); MODULE_FIRMWARE("libertas_cs_helper.fw"); MODULE_FIRMWARE("libertas_cs.fw"); /********************************************************************/ /* Hardware access */ /********************************************************************/ /* This define enables wrapper functions which allow you to dump all register accesses. You normally won't this, except for development */ /* #define DEBUG_IO */ #ifdef DEBUG_IO static int debug_output = 0; #else /* This way the compiler optimizes the printk's away */ #define debug_output 0 #endif static inline unsigned int if_cs_read8(struct if_cs_card *card, uint reg) { unsigned int val = ioread8(card->iobase + reg); if (debug_output) printk(KERN_INFO "inb %08x<%02x\n", reg, val); return val; } static inline unsigned int if_cs_read16(struct if_cs_card *card, uint reg) { unsigned int val = ioread16(card->iobase + reg); if (debug_output) printk(KERN_INFO "inw %08x<%04x\n", reg, val); return val; } static inline void if_cs_read16_rep( struct if_cs_card *card, uint reg, void *buf, unsigned long count) { if (debug_output) printk(KERN_INFO "insw %08x<(0x%lx words)\n", reg, count); ioread16_rep(card->iobase + reg, buf, count); } static inline void if_cs_write8(struct if_cs_card *card, uint reg, u8 val) { if (debug_output) printk(KERN_INFO "outb %08x>%02x\n", reg, val); iowrite8(val, card->iobase + reg); } static inline void if_cs_write16(struct if_cs_card *card, uint reg, u16 val) { if (debug_output) printk(KERN_INFO "outw %08x>%04x\n", reg, val); iowrite16(val, card->iobase + reg); } static inline void if_cs_write16_rep( struct if_cs_card *card, uint reg, const void *buf, unsigned long count) { if (debug_output) printk(KERN_INFO "outsw %08x>(0x%lx words)\n", reg, count); iowrite16_rep(card->iobase + reg, buf, count); } /* * I know that polling/delaying is frowned upon. However, this procedure * with polling is needed while downloading the firmware. At this stage, * the hardware does unfortunately not create any interrupts. * * Fortunately, this function is never used once the firmware is in * the card. :-) * * As a reference, see the "Firmware Specification v5.1", page 18 * and 19. I did not follow their suggested timing to the word, * but this works nice & fast anyway. */ static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 reg) { int i; for (i = 0; i < 100000; i++) { u8 val = if_cs_read8(card, addr); if (val == reg) return 0; udelay(5); } return -ETIME; } /* * First the bitmasks for the host/card interrupt/status registers: */ #define IF_CS_BIT_TX 0x0001 #define IF_CS_BIT_RX 0x0002 #define IF_CS_BIT_COMMAND 0x0004 #define IF_CS_BIT_RESP 0x0008 #define IF_CS_BIT_EVENT 0x0010 #define IF_CS_BIT_MASK 0x001f /* * It's not really clear to me what the host status register is for. It * needs to be set almost in union with "host int cause". The following * bits from above are used: * * IF_CS_BIT_TX driver downloaded a data packet * IF_CS_BIT_RX driver got a data packet * IF_CS_BIT_COMMAND driver downloaded a command * IF_CS_BIT_RESP not used (has some meaning with powerdown) * IF_CS_BIT_EVENT driver read a host event */ #define IF_CS_HOST_STATUS 0x00000000 /* * With the host int cause register can the host (that is, Linux) cause * an interrupt in the firmware, to tell the firmware about those events: * * IF_CS_BIT_TX a data packet has been downloaded * IF_CS_BIT_RX a received data packet has retrieved * IF_CS_BIT_COMMAND a firmware block or a command has been downloaded * IF_CS_BIT_RESP not used (has some meaning with powerdown) * IF_CS_BIT_EVENT a host event (link lost etc) has been retrieved */ #define IF_CS_HOST_INT_CAUSE 0x00000002 /* * The host int mask register is used to enable/disable interrupt. However, * I have the suspicion that disabled interrupts are lost. */ #define IF_CS_HOST_INT_MASK 0x00000004 /* * Used to send or receive data packets: */ #define IF_CS_WRITE 0x00000016 #define IF_CS_WRITE_LEN 0x00000014 #define IF_CS_READ 0x00000010 #define IF_CS_READ_LEN 0x00000024 /* * Used to send commands (and to send firmware block) and to * receive command responses: */ #define IF_CS_CMD 0x0000001A #define IF_CS_CMD_LEN 0x00000018 #define IF_CS_RESP 0x00000012 #define IF_CS_RESP_LEN 0x00000030 /* * The card status registers shows what the card/firmware actually * accepts: * * IF_CS_BIT_TX you may send a data packet * IF_CS_BIT_RX you may retrieve a data packet * IF_CS_BIT_COMMAND you may send a command * IF_CS_BIT_RESP you may retrieve a command response * IF_CS_BIT_EVENT the card has a event for use (link lost, snr low etc) * * When reading this register several times, you will get back the same * results --- with one exception: the IF_CS_BIT_EVENT clear itself * automatically. * * Not that we don't rely on BIT_RX,_BIT_RESP or BIT_EVENT because * we handle this via the card int cause register. */ #define IF_CS_CARD_STATUS 0x00000020 #define IF_CS_CARD_STATUS_MASK 0x7f00 /* * The card int cause register is used by the card/firmware to notify us * about the following events: * * IF_CS_BIT_TX a data packet has successfully been sentx * IF_CS_BIT_RX a data packet has been received and can be retrieved * IF_CS_BIT_COMMAND not used * IF_CS_BIT_RESP the firmware has a command response for us * IF_CS_BIT_EVENT the card has a event for use (link lost, snr low etc) */ #define IF_CS_CARD_INT_CAUSE 0x00000022 /* * This is used to for handshaking with the card's bootloader/helper image * to synchronize downloading of firmware blocks. */ #define IF_CS_SQ_READ_LOW 0x00000028 #define IF_CS_SQ_HELPER_OK 0x10 /* * The scratch register tells us ... * * IF_CS_SCRATCH_BOOT_OK the bootloader runs * IF_CS_SCRATCH_HELPER_OK the helper firmware already runs */ #define IF_CS_SCRATCH 0x0000003F #define IF_CS_SCRATCH_BOOT_OK 0x00 #define IF_CS_SCRATCH_HELPER_OK 0x5a /* * Used to detect ancient chips: */ #define IF_CS_PRODUCT_ID 0x0000001C #define IF_CS_CF8385_B1_REV 0x12 #define IF_CS_CF8381_B3_REV 0x04 #define IF_CS_CF8305_B1_REV 0x03 /* * Used to detect other cards than CF8385 since their revisions of silicon * doesn't match those from CF8385, eg. CF8381 B3 works with this driver. */ #define CF8305_MANFID 0x02db #define CF8305_CARDID 0x8103 #define CF8381_MANFID 0x02db #define CF8381_CARDID 0x6064 #define CF8385_MANFID 0x02df #define CF8385_CARDID 0x8103 /* * FIXME: just use the 'driver_info' field of 'struct pcmcia_device_id' when * that gets fixed. Currently there's no way to access it from the probe hook. */ static inline u32 get_model(u16 manf_id, u16 card_id) { /* NOTE: keep in sync with if_cs_ids */ if (manf_id == CF8305_MANFID && card_id == CF8305_CARDID) return MODEL_8305; else if (manf_id == CF8381_MANFID && card_id == CF8381_CARDID) return MODEL_8381; else if (manf_id == CF8385_MANFID && card_id == CF8385_CARDID) return MODEL_8385; return MODEL_UNKNOWN; } /********************************************************************/ /* I/O and interrupt handling */ /********************************************************************/ static inline void if_cs_enable_ints(struct if_cs_card *card) { lbs_deb_enter(LBS_DEB_CS); if_cs_write16(card, IF_CS_HOST_INT_MASK, 0); } static inline void if_cs_disable_ints(struct if_cs_card *card) { lbs_deb_enter(LBS_DEB_CS); if_cs_write16(card, IF_CS_HOST_INT_MASK, IF_CS_BIT_MASK); } /* * Called from if_cs_host_to_card to send a command to the hardware */ static int if_cs_send_cmd(struct lbs_private *priv, u8 *buf, u16 nb) { struct if_cs_card *card = (struct if_cs_card *)priv->card; int ret = -1; int loops = 0; lbs_deb_enter(LBS_DEB_CS); if_cs_disable_ints(card); /* Is hardware ready? */ while (1) { u16 status = if_cs_read16(card, IF_CS_CARD_STATUS); if (status & IF_CS_BIT_COMMAND) break; if (++loops > 100) { netdev_err(priv->dev, "card not ready for commands\n"); goto done; } mdelay(1); } if_cs_write16(card, IF_CS_CMD_LEN, nb); if_cs_write16_rep(card, IF_CS_CMD, buf, nb / 2); /* Are we supposed to transfer an odd amount of bytes? */ if (nb & 1) if_cs_write8(card, IF_CS_CMD, buf[nb-1]); /* "Assert the download over interrupt command in the Host * status register" */ if_cs_write16(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND); /* "Assert the download over interrupt command in the Card * interrupt case register" */ if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND); ret = 0; done: if_cs_enable_ints(card); lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); return ret; } /* * Called from if_cs_host_to_card to send a data to the hardware */ static void if_cs_send_data(struct lbs_private *priv, u8 *buf, u16 nb) { struct if_cs_card *card = (struct if_cs_card *)priv->card; u16 status; lbs_deb_enter(LBS_DEB_CS); if_cs_disable_ints(card); status = if_cs_read16(card, IF_CS_CARD_STATUS); BUG_ON((status & IF_CS_BIT_TX) == 0); if_cs_write16(card, IF_CS_WRITE_LEN, nb); /* write even number of bytes, then odd byte if necessary */ if_cs_write16_rep(card, IF_CS_WRITE, buf, nb / 2); if (nb & 1) if_cs_write8(card, IF_CS_WRITE, buf[nb-1]); if_cs_write16(card, IF_CS_HOST_STATUS, IF_CS_BIT_TX); if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_TX); if_cs_enable_ints(card); lbs_deb_leave(LBS_DEB_CS); } /* * Get the command result out of the card. */ static int if_cs_receive_cmdres(struct lbs_private *priv, u8 *data, u32 *len) { unsigned long flags; int ret = -1; u16 status; lbs_deb_enter(LBS_DEB_CS); /* is hardware ready? */ status = if_cs_read16(priv->card, IF_CS_CARD_STATUS); if ((status & IF_CS_BIT_RESP) == 0) { netdev_err(priv->dev, "no cmd response in card\n"); *len = 0; goto out; } *len = if_cs_read16(priv->card, IF_CS_RESP_LEN); if ((*len == 0) || (*len > LBS_CMD_BUFFER_SIZE)) { netdev_err(priv->dev, "card cmd buffer has invalid # of bytes (%d)\n", *len); goto out; } /* read even number of bytes, then odd byte if necessary */ if_cs_read16_rep(priv->card, IF_CS_RESP, data, *len/sizeof(u16)); if (*len & 1) data[*len-1] = if_cs_read8(priv->card, IF_CS_RESP); /* This is a workaround for a firmware that reports too much * bytes */ *len -= 8; ret = 0; /* Clear this flag again */ spin_lock_irqsave(&priv->driver_lock, flags); priv->dnld_sent = DNLD_RES_RECEIVED; spin_unlock_irqrestore(&priv->driver_lock, flags); out: lbs_deb_leave_args(LBS_DEB_CS, "ret %d, len %d", ret, *len); return ret; } static struct sk_buff *if_cs_receive_data(struct lbs_private *priv) { struct sk_buff *skb = NULL; u16 len; u8 *data; lbs_deb_enter(LBS_DEB_CS); len = if_cs_read16(priv->card, IF_CS_READ_LEN); if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) { netdev_err(priv->dev, "card data buffer has invalid # of bytes (%d)\n", len); priv->dev->stats.rx_dropped++; goto dat_err; } skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + 2); if (!skb) goto out; skb_put(skb, len); skb_reserve(skb, 2);/* 16 byte align */ data = skb->data; /* read even number of bytes, then odd byte if necessary */ if_cs_read16_rep(priv->card, IF_CS_READ, data, len/sizeof(u16)); if (len & 1) data[len-1] = if_cs_read8(priv->card, IF_CS_READ); dat_err: if_cs_write16(priv->card, IF_CS_HOST_STATUS, IF_CS_BIT_RX); if_cs_write16(priv->card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_RX); out: lbs_deb_leave_args(LBS_DEB_CS, "ret %p", skb); return skb; } static irqreturn_t if_cs_interrupt(int irq, void *data) { struct if_cs_card *card = data; struct lbs_private *priv = card->priv; u16 cause; lbs_deb_enter(LBS_DEB_CS); /* Ask card interrupt cause register if there is something for us */ cause = if_cs_read16(card, IF_CS_CARD_INT_CAUSE); lbs_deb_cs("cause 0x%04x\n", cause); if (cause == 0) { /* Not for us */ return IRQ_NONE; } if (cause == 0xffff) { /* Read in junk, the card has probably been removed */ card->priv->surpriseremoved = 1; return IRQ_HANDLED; } if (cause & IF_CS_BIT_RX) { struct sk_buff *skb; lbs_deb_cs("rx packet\n"); skb = if_cs_receive_data(priv); if (skb) lbs_process_rxed_packet(priv, skb); } if (cause & IF_CS_BIT_TX) { lbs_deb_cs("tx done\n"); lbs_host_to_card_done(priv); } if (cause & IF_CS_BIT_RESP) { unsigned long flags; u8 i; lbs_deb_cs("cmd resp\n"); spin_lock_irqsave(&priv->driver_lock, flags); i = (priv->resp_idx == 0) ? 1 : 0; spin_unlock_irqrestore(&priv->driver_lock, flags); BUG_ON(priv->resp_len[i]); if_cs_receive_cmdres(priv, priv->resp_buf[i], &priv->resp_len[i]); spin_lock_irqsave(&priv->driver_lock, flags); lbs_notify_command_response(priv, i); spin_unlock_irqrestore(&priv->driver_lock, flags); } if (cause & IF_CS_BIT_EVENT) { u16 status = if_cs_read16(priv->card, IF_CS_CARD_STATUS); if_cs_write16(priv->card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_EVENT); lbs_queue_event(priv, (status & IF_CS_CARD_STATUS_MASK) >> 8); } /* Clear interrupt cause */ if_cs_write16(card, IF_CS_CARD_INT_CAUSE, cause & IF_CS_BIT_MASK); lbs_deb_leave(LBS_DEB_CS); return IRQ_HANDLED; } /********************************************************************/ /* Firmware */ /********************************************************************/ /* * Tries to program the helper firmware. * * Return 0 on success */ static int if_cs_prog_helper(struct if_cs_card *card, const struct firmware *fw) { int ret = 0; int sent = 0; u8 scratch; lbs_deb_enter(LBS_DEB_CS); /* * This is the only place where an unaligned register access happens on * the CF8305 card, therefore for the sake of speed of the driver, we do * the alignment correction here. */ if (card->align_regs) scratch = if_cs_read16(card, IF_CS_SCRATCH) >> 8; else scratch = if_cs_read8(card, IF_CS_SCRATCH); /* "If the value is 0x5a, the firmware is already * downloaded successfully" */ if (scratch == IF_CS_SCRATCH_HELPER_OK) goto done; /* "If the value is != 00, it is invalid value of register */ if (scratch != IF_CS_SCRATCH_BOOT_OK) { ret = -ENODEV; goto done; } lbs_deb_cs("helper size %td\n", fw->size); /* "Set the 5 bytes of the helper image to 0" */ /* Not needed, this contains an ARM branch instruction */ for (;;) { /* "the number of bytes to send is 256" */ int count = 256; int remain = fw->size - sent; if (remain < count) count = remain; /* * "write the number of bytes to be sent to the I/O Command * write length register" */ if_cs_write16(card, IF_CS_CMD_LEN, count); /* "write this to I/O Command port register as 16 bit writes */ if (count) if_cs_write16_rep(card, IF_CS_CMD, &fw->data[sent], count >> 1); /* * "Assert the download over interrupt command in the Host * status register" */ if_cs_write8(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND); /* * "Assert the download over interrupt command in the Card * interrupt case register" */ if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND); /* * "The host polls the Card Status register ... for 50 ms before * declaring a failure" */ ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS, IF_CS_BIT_COMMAND); if (ret < 0) { pr_err("can't download helper at 0x%x, ret %d\n", sent, ret); goto done; } if (count == 0) break; sent += count; } done: lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); return ret; } static int if_cs_prog_real(struct if_cs_card *card, const struct firmware *fw) { int ret = 0; int retry = 0; int len = 0; int sent; lbs_deb_enter(LBS_DEB_CS); lbs_deb_cs("fw size %td\n", fw->size); ret = if_cs_poll_while_fw_download(card, IF_CS_SQ_READ_LOW, IF_CS_SQ_HELPER_OK); if (ret < 0) { pr_err("helper firmware doesn't answer\n"); goto done; } for (sent = 0; sent < fw->size; sent += len) { len = if_cs_read16(card, IF_CS_SQ_READ_LOW); if (len & 1) { retry++; pr_info("odd, need to retry this firmware block\n"); } else { retry = 0; } if (retry > 20) { pr_err("could not download firmware\n"); ret = -ENODEV; goto done; } if (retry) { sent -= len; } if_cs_write16(card, IF_CS_CMD_LEN, len); if_cs_write16_rep(card, IF_CS_CMD, &fw->data[sent], (len+1) >> 1); if_cs_write8(card, IF_CS_HOST_STATUS, IF_CS_BIT_COMMAND); if_cs_write16(card, IF_CS_HOST_INT_CAUSE, IF_CS_BIT_COMMAND); ret = if_cs_poll_while_fw_download(card, IF_CS_CARD_STATUS, IF_CS_BIT_COMMAND); if (ret < 0) { pr_err("can't download firmware at 0x%x\n", sent); goto done; } } ret = if_cs_poll_while_fw_download(card, IF_CS_SCRATCH, 0x5a); if (ret < 0) pr_err("firmware download failed\n"); done: lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); return ret; } static void if_cs_prog_firmware(struct lbs_private *priv, int ret, const struct firmware *helper, const struct firmware *mainfw) { struct if_cs_card *card = priv->card; if (ret) { pr_err("failed to find firmware (%d)\n", ret); return; } /* Load the firmware */ ret = if_cs_prog_helper(card, helper); if (ret == 0 && (card->model != MODEL_8305)) ret = if_cs_prog_real(card, mainfw); if (ret) return; /* Now actually get the IRQ */ ret = request_irq(card->p_dev->irq, if_cs_interrupt, IRQF_SHARED, DRV_NAME, card); if (ret) { pr_err("error in request_irq\n"); return; } /* * Clear any interrupt cause that happened while sending * firmware/initializing card */ if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK); if_cs_enable_ints(card); /* And finally bring the card up */ priv->fw_ready = 1; if (lbs_start_card(priv) != 0) { pr_err("could not activate card\n"); free_irq(card->p_dev->irq, card); } } /********************************************************************/ /* Callback functions for libertas.ko */ /********************************************************************/ /* Send commands or data packets to the card */ static int if_cs_host_to_card(struct lbs_private *priv, u8 type, u8 *buf, u16 nb) { int ret = -1; lbs_deb_enter_args(LBS_DEB_CS, "type %d, bytes %d", type, nb); switch (type) { case MVMS_DAT: priv->dnld_sent = DNLD_DATA_SENT; if_cs_send_data(priv, buf, nb); ret = 0; break; case MVMS_CMD: priv->dnld_sent = DNLD_CMD_SENT; ret = if_cs_send_cmd(priv, buf, nb); break; default: netdev_err(priv->dev, "%s: unsupported type %d\n", __func__, type); } lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); return ret; } static void if_cs_release(struct pcmcia_device *p_dev) { struct if_cs_card *card = p_dev->priv; lbs_deb_enter(LBS_DEB_CS); free_irq(p_dev->irq, card); pcmcia_disable_device(p_dev); if (card->iobase) ioport_unmap(card->iobase); lbs_deb_leave(LBS_DEB_CS); } static int if_cs_ioprobe(struct pcmcia_device *p_dev, void *priv_data) { p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; if (p_dev->resource[1]->end) { pr_err("wrong CIS (check number of IO windows)\n"); return -ENODEV; } /* This reserves IO space but doesn't actually enable it */ return pcmcia_request_io(p_dev); } static int if_cs_probe(struct pcmcia_device *p_dev) { int ret = -ENOMEM; unsigned int prod_id; struct lbs_private *priv; struct if_cs_card *card; lbs_deb_enter(LBS_DEB_CS); card = kzalloc(sizeof(struct if_cs_card), GFP_KERNEL); if (!card) goto out; card->p_dev = p_dev; p_dev->priv = card; p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; if (pcmcia_loop_config(p_dev, if_cs_ioprobe, NULL)) { pr_err("error in pcmcia_loop_config\n"); goto out1; } /* * Allocate an interrupt line. Note that this does not assign * a handler to the interrupt, unless the 'Handler' member of * the irq structure is initialized. */ if (!p_dev->irq) goto out1; /* Initialize io access */ card->iobase = ioport_map(p_dev->resource[0]->start, resource_size(p_dev->resource[0])); if (!card->iobase) { pr_err("error in ioport_map\n"); ret = -EIO; goto out1; } ret = pcmcia_enable_device(p_dev); if (ret) { pr_err("error in pcmcia_enable_device\n"); goto out2; } /* Finally, report what we've done */ lbs_deb_cs("irq %d, io %pR", p_dev->irq, p_dev->resource[0]); /* * Most of the libertas cards can do unaligned register access, but some * weird ones cannot. That's especially true for the CF8305 card. */ card->align_regs = false; card->model = get_model(p_dev->manf_id, p_dev->card_id); if (card->model == MODEL_UNKNOWN) { pr_err("unsupported manf_id 0x%04x / card_id 0x%04x\n", p_dev->manf_id, p_dev->card_id); ret = -ENODEV; goto out2; } /* Check if we have a current silicon */ prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID); if (card->model == MODEL_8305) { card->align_regs = true; if (prod_id < IF_CS_CF8305_B1_REV) { pr_err("8305 rev B0 and older are not supported\n"); ret = -ENODEV; goto out2; } } if ((card->model == MODEL_8381) && prod_id < IF_CS_CF8381_B3_REV) { pr_err("8381 rev B2 and older are not supported\n"); ret = -ENODEV; goto out2; } if ((card->model == MODEL_8385) && prod_id < IF_CS_CF8385_B1_REV) { pr_err("8385 rev B0 and older are not supported\n"); ret = -ENODEV; goto out2; } /* Make this card known to the libertas driver */ priv = lbs_add_card(card, &p_dev->dev); if (!priv) { ret = -ENOMEM; goto out2; } /* Set up fields in lbs_private */ card->priv = priv; priv->card = card; priv->hw_host_to_card = if_cs_host_to_card; priv->enter_deep_sleep = NULL; priv->exit_deep_sleep = NULL; priv->reset_deep_sleep_wakeup = NULL; /* Get firmware */ ret = lbs_get_firmware_async(priv, &p_dev->dev, card->model, fw_table, if_cs_prog_firmware); if (ret) { pr_err("failed to find firmware (%d)\n", ret); goto out3; } goto out; out3: lbs_remove_card(priv); out2: ioport_unmap(card->iobase); out1: pcmcia_disable_device(p_dev); out: lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); return ret; } static void if_cs_detach(struct pcmcia_device *p_dev) { struct if_cs_card *card = p_dev->priv; lbs_deb_enter(LBS_DEB_CS); lbs_stop_card(card->priv); lbs_remove_card(card->priv); if_cs_disable_ints(card); if_cs_release(p_dev); kfree(card); lbs_deb_leave(LBS_DEB_CS); } /********************************************************************/ /* Module initialization */ /********************************************************************/ static const struct pcmcia_device_id if_cs_ids[] = { PCMCIA_DEVICE_MANF_CARD(CF8305_MANFID, CF8305_CARDID), PCMCIA_DEVICE_MANF_CARD(CF8381_MANFID, CF8381_CARDID), PCMCIA_DEVICE_MANF_CARD(CF8385_MANFID, CF8385_CARDID), /* NOTE: keep in sync with get_model() */ PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, if_cs_ids); static struct pcmcia_driver lbs_driver = { .owner = THIS_MODULE, .name = DRV_NAME, .probe = if_cs_probe, .remove = if_cs_detach, .id_table = if_cs_ids, }; module_pcmcia_driver(lbs_driver);
gpl-2.0
sztupy/samsung-kernel-herring
arch/mips/txx9/rbtx4939/setup.c
3743
14719
/* * Toshiba RBTX4939 setup routines. * Based on linux/arch/mips/txx9/rbtx4938/setup.c, * and RBTX49xx patch from CELF patch archive. * * Copyright (C) 2000-2001,2005-2007 Toshiba Corporation * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the * terms of the GNU General Public License version 2. This program is * licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/interrupt.h> #include <linux/smc91x.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/map.h> #include <asm/reboot.h> #include <asm/txx9/generic.h> #include <asm/txx9/pci.h> #include <asm/txx9/rbtx4939.h> static void rbtx4939_machine_restart(char *command) { local_irq_disable(); writeb(1, rbtx4939_reseten_addr); writeb(1, rbtx4939_softreset_addr); while (1) ; } static void __init rbtx4939_time_init(void) { tx4939_time_init(0); } #if defined(__BIG_ENDIAN) && \ (defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)) #define HAVE_RBTX4939_IOSWAB #define IS_CE1_ADDR(addr) \ ((((unsigned long)(addr) - IO_BASE) & 0xfff00000) == TXX9_CE(1)) static u16 rbtx4939_ioswabw(volatile u16 *a, u16 x) { return IS_CE1_ADDR(a) ? x : le16_to_cpu(x); } static u16 rbtx4939_mem_ioswabw(volatile u16 *a, u16 x) { return !IS_CE1_ADDR(a) ? x : le16_to_cpu(x); } #endif /* __BIG_ENDIAN && CONFIG_SMC91X */ static void __init rbtx4939_pci_setup(void) { #ifdef CONFIG_PCI int extarb = !(__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCIARB); struct pci_controller *c = &txx9_primary_pcic; register_pci_controller(c); tx4939_report_pciclk(); tx4927_pcic_setup(tx4939_pcicptr, c, extarb); if (!(__raw_readq(&tx4939_ccfgptr->pcfg) & TX4939_PCFG_ATA1MODE) && (__raw_readq(&tx4939_ccfgptr->pcfg) & (TX4939_PCFG_ET0MODE | TX4939_PCFG_ET1MODE))) { tx4939_report_pci1clk(); /* mem:64K(max), io:64K(max) (enough for ETH0,ETH1) */ c = txx9_alloc_pci_controller(NULL, 0, 0x10000, 0, 0x10000); register_pci_controller(c); tx4927_pcic_setup(tx4939_pcic1ptr, c, 0); } tx4939_setup_pcierr_irq(); #endif /* CONFIG_PCI */ } static unsigned long long default_ebccr[] __initdata = { 0x01c0000000007608ULL, /* 64M ROM */ 0x017f000000007049ULL, /* 1M IOC */ 0x0180000000408608ULL, /* ISA */ 0, }; static void __init rbtx4939_ebusc_setup(void) { int i; unsigned int sp; /* use user-configured speed */ sp = TX4939_EBUSC_CR(0) & 0x30; default_ebccr[0] |= sp; default_ebccr[1] |= sp; default_ebccr[2] |= sp; /* initialise by myself */ for (i = 0; i < ARRAY_SIZE(default_ebccr); i++) { if (default_ebccr[i]) ____raw_writeq(default_ebccr[i], &tx4939_ebuscptr->cr[i]); else ____raw_writeq(____raw_readq(&tx4939_ebuscptr->cr[i]) & ~8, &tx4939_ebuscptr->cr[i]); } } static void __init rbtx4939_update_ioc_pen(void) { __u64 pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg); __u64 ccfg = ____raw_readq(&tx4939_ccfgptr->ccfg); __u8 pe1 = readb(rbtx4939_pe1_addr); __u8 pe2 = readb(rbtx4939_pe2_addr); __u8 pe3 = readb(rbtx4939_pe3_addr); if (pcfg & TX4939_PCFG_ATA0MODE) pe1 |= RBTX4939_PE1_ATA(0); else pe1 &= ~RBTX4939_PE1_ATA(0); if (pcfg & TX4939_PCFG_ATA1MODE) { pe1 |= RBTX4939_PE1_ATA(1); pe1 &= ~(RBTX4939_PE1_RMII(0) | RBTX4939_PE1_RMII(1)); } else { pe1 &= ~RBTX4939_PE1_ATA(1); if (pcfg & TX4939_PCFG_ET0MODE) pe1 |= RBTX4939_PE1_RMII(0); else pe1 &= ~RBTX4939_PE1_RMII(0); if (pcfg & TX4939_PCFG_ET1MODE) pe1 |= RBTX4939_PE1_RMII(1); else pe1 &= ~RBTX4939_PE1_RMII(1); } if (ccfg & TX4939_CCFG_PTSEL) pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_P | RBTX4939_PE3_VP_S); else { __u64 vmode = pcfg & (TX4939_PCFG_VSSMODE | TX4939_PCFG_VPSMODE); if (vmode == 0) pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_P | RBTX4939_PE3_VP_S); else if (vmode == TX4939_PCFG_VPSMODE) { pe3 |= RBTX4939_PE3_VP_P; pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_S); } else if (vmode == TX4939_PCFG_VSSMODE) { pe3 |= RBTX4939_PE3_VP | RBTX4939_PE3_VP_S; pe3 &= ~RBTX4939_PE3_VP_P; } else { pe3 |= RBTX4939_PE3_VP | RBTX4939_PE3_VP_P; pe3 &= ~RBTX4939_PE3_VP_S; } } if (pcfg & TX4939_PCFG_SPIMODE) { if (pcfg & TX4939_PCFG_SIO2MODE_GPIO) pe2 &= ~(RBTX4939_PE2_SIO2 | RBTX4939_PE2_SIO0); else { if (pcfg & TX4939_PCFG_SIO2MODE_SIO2) { pe2 |= RBTX4939_PE2_SIO2; pe2 &= ~RBTX4939_PE2_SIO0; } else { pe2 |= RBTX4939_PE2_SIO0; pe2 &= ~RBTX4939_PE2_SIO2; } } if (pcfg & TX4939_PCFG_SIO3MODE) pe2 |= RBTX4939_PE2_SIO3; else pe2 &= ~RBTX4939_PE2_SIO3; pe2 &= ~RBTX4939_PE2_SPI; } else { pe2 |= RBTX4939_PE2_SPI; pe2 &= ~(RBTX4939_PE2_SIO3 | RBTX4939_PE2_SIO2 | RBTX4939_PE2_SIO0); } if ((pcfg & TX4939_PCFG_I2SMODE_MASK) == TX4939_PCFG_I2SMODE_GPIO) pe2 |= RBTX4939_PE2_GPIO; else pe2 &= ~RBTX4939_PE2_GPIO; writeb(pe1, rbtx4939_pe1_addr); writeb(pe2, rbtx4939_pe2_addr); writeb(pe3, rbtx4939_pe3_addr); } #define RBTX4939_MAX_7SEGLEDS 8 #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) static u8 led_val[RBTX4939_MAX_7SEGLEDS]; struct rbtx4939_led_data { struct led_classdev cdev; char name[32]; unsigned int num; }; /* Use "dot" in 7seg LEDs */ static void rbtx4939_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { struct rbtx4939_led_data *led_dat = container_of(led_cdev, struct rbtx4939_led_data, cdev); unsigned int num = led_dat->num; unsigned long flags; local_irq_save(flags); led_val[num] = (led_val[num] & 0x7f) | (value ? 0x80 : 0); writeb(led_val[num], rbtx4939_7seg_addr(num / 4, num % 4)); local_irq_restore(flags); } static int __init rbtx4939_led_probe(struct platform_device *pdev) { struct rbtx4939_led_data *leds_data; int i; static char *default_triggers[] __initdata = { "heartbeat", "ide-disk", "nand-disk", }; leds_data = kzalloc(sizeof(*leds_data) * RBTX4939_MAX_7SEGLEDS, GFP_KERNEL); if (!leds_data) return -ENOMEM; for (i = 0; i < RBTX4939_MAX_7SEGLEDS; i++) { int rc; struct rbtx4939_led_data *led_dat = &leds_data[i]; led_dat->num = i; led_dat->cdev.brightness_set = rbtx4939_led_brightness_set; sprintf(led_dat->name, "rbtx4939:amber:%u", i); led_dat->cdev.name = led_dat->name; if (i < ARRAY_SIZE(default_triggers)) led_dat->cdev.default_trigger = default_triggers[i]; rc = led_classdev_register(&pdev->dev, &led_dat->cdev); if (rc < 0) return rc; led_dat->cdev.brightness_set(&led_dat->cdev, 0); } return 0; } static struct platform_driver rbtx4939_led_driver = { .driver = { .name = "rbtx4939-led", .owner = THIS_MODULE, }, }; static void __init rbtx4939_led_setup(void) { platform_device_register_simple("rbtx4939-led", -1, NULL, 0); platform_driver_probe(&rbtx4939_led_driver, rbtx4939_led_probe); } #else static inline void rbtx4939_led_setup(void) { } #endif static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val) { #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) unsigned long flags; local_irq_save(flags); /* bit7: reserved for LED class */ led_val[pos] = (led_val[pos] & 0x80) | (val & 0x7f); val = led_val[pos]; local_irq_restore(flags); #endif writeb(val, rbtx4939_7seg_addr(pos / 4, pos % 4)); } static void rbtx4939_7segled_putc(unsigned int pos, unsigned char val) { /* convert from map_to_seg7() notation */ val = (val & 0x88) | ((val & 0x40) >> 6) | ((val & 0x20) >> 4) | ((val & 0x10) >> 2) | ((val & 0x04) << 2) | ((val & 0x02) << 4) | ((val & 0x01) << 6); __rbtx4939_7segled_putc(pos, val); } #if defined(CONFIG_MTD_RBTX4939) || defined(CONFIG_MTD_RBTX4939_MODULE) /* special mapping for boot rom */ static unsigned long rbtx4939_flash_fixup_ofs(unsigned long ofs) { u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f; unsigned char shift; if (bdipsw & 8) { /* BOOT Mode: USER ROM1 / USER ROM2 */ shift = bdipsw & 3; /* rotate A[23:22] */ return (ofs & ~0xc00000) | ((((ofs >> 22) + shift) & 3) << 22); } #ifdef __BIG_ENDIAN if (bdipsw == 0) /* BOOT Mode: Monitor ROM */ ofs ^= 0x400000; /* swap A[22] */ #endif return ofs; } static map_word rbtx4939_flash_read16(struct map_info *map, unsigned long ofs) { map_word r; ofs = rbtx4939_flash_fixup_ofs(ofs); r.x[0] = __raw_readw(map->virt + ofs); return r; } static void rbtx4939_flash_write16(struct map_info *map, const map_word datum, unsigned long ofs) { ofs = rbtx4939_flash_fixup_ofs(ofs); __raw_writew(datum.x[0], map->virt + ofs); mb(); /* see inline_map_write() in mtd/map.h */ } static void rbtx4939_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f; unsigned char shift; ssize_t curlen; from += (unsigned long)map->virt; if (bdipsw & 8) { /* BOOT Mode: USER ROM1 / USER ROM2 */ shift = bdipsw & 3; while (len) { curlen = min_t(unsigned long, len, 0x400000 - (from & (0x400000 - 1))); memcpy(to, (void *)((from & ~0xc00000) | ((((from >> 22) + shift) & 3) << 22)), curlen); len -= curlen; from += curlen; to += curlen; } return; } #ifdef __BIG_ENDIAN if (bdipsw == 0) { /* BOOT Mode: Monitor ROM */ while (len) { curlen = min_t(unsigned long, len, 0x400000 - (from & (0x400000 - 1))); memcpy(to, (void *)(from ^ 0x400000), curlen); len -= curlen; from += curlen; to += curlen; } return; } #endif memcpy(to, (void *)from, len); } static void rbtx4939_flash_map_init(struct map_info *map) { map->read = rbtx4939_flash_read16; map->write = rbtx4939_flash_write16; map->copy_from = rbtx4939_flash_copy_from; } static void __init rbtx4939_mtd_init(void) { static struct { struct platform_device dev; struct resource res; struct rbtx4939_flash_data data; } pdevs[4]; int i; static char names[4][8]; static struct mtd_partition parts[4]; struct rbtx4939_flash_data *boot_pdata = &pdevs[0].data; u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f; if (bdipsw & 8) { /* BOOT Mode: USER ROM1 / USER ROM2 */ boot_pdata->nr_parts = 4; for (i = 0; i < boot_pdata->nr_parts; i++) { sprintf(names[i], "img%d", 4 - i); parts[i].name = names[i]; parts[i].size = 0x400000; parts[i].offset = MTDPART_OFS_NXTBLK; } } else if (bdipsw == 0) { /* BOOT Mode: Monitor ROM */ boot_pdata->nr_parts = 2; strcpy(names[0], "big"); strcpy(names[1], "little"); for (i = 0; i < boot_pdata->nr_parts; i++) { parts[i].name = names[i]; parts[i].size = 0x400000; parts[i].offset = MTDPART_OFS_NXTBLK; } } else { /* BOOT Mode: ROM Emulator */ boot_pdata->nr_parts = 2; parts[0].name = "boot"; parts[0].offset = 0xc00000; parts[0].size = 0x400000; parts[1].name = "user"; parts[1].offset = 0; parts[1].size = 0xc00000; } boot_pdata->parts = parts; boot_pdata->map_init = rbtx4939_flash_map_init; for (i = 0; i < ARRAY_SIZE(pdevs); i++) { struct resource *r = &pdevs[i].res; struct platform_device *dev = &pdevs[i].dev; r->start = 0x1f000000 - i * 0x1000000; r->end = r->start + 0x1000000 - 1; r->flags = IORESOURCE_MEM; pdevs[i].data.width = 2; dev->num_resources = 1; dev->resource = r; dev->id = i; dev->name = "rbtx4939-flash"; dev->dev.platform_data = &pdevs[i].data; platform_device_register(dev); } } #else static void __init rbtx4939_mtd_init(void) { } #endif static void __init rbtx4939_arch_init(void) { rbtx4939_pci_setup(); } static void __init rbtx4939_device_init(void) { unsigned long smc_addr = RBTX4939_ETHER_ADDR - IO_BASE; struct resource smc_res[] = { { .start = smc_addr, .end = smc_addr + 0x10 - 1, .flags = IORESOURCE_MEM, }, { .start = RBTX4939_IRQ_ETHER, /* override default irq flag defined in smc91x.h */ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; struct smc91x_platdata smc_pdata = { .flags = SMC91X_USE_16BIT, }; struct platform_device *pdev; #if defined(CONFIG_TC35815) || defined(CONFIG_TC35815_MODULE) int i, j; unsigned char ethaddr[2][6]; u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f; for (i = 0; i < 2; i++) { unsigned long area = CKSEG1 + 0x1fff0000 + (i * 0x10); if (bdipsw == 0) memcpy(ethaddr[i], (void *)area, 6); else { u16 buf[3]; if (bdipsw & 8) area -= 0x03000000; else area -= 0x01000000; for (j = 0; j < 3; j++) buf[j] = le16_to_cpup((u16 *)(area + j * 2)); memcpy(ethaddr[i], buf, 6); } } tx4939_ethaddr_init(ethaddr[0], ethaddr[1]); #endif pdev = platform_device_alloc("smc91x", -1); if (!pdev || platform_device_add_resources(pdev, smc_res, ARRAY_SIZE(smc_res)) || platform_device_add_data(pdev, &smc_pdata, sizeof(smc_pdata)) || platform_device_add(pdev)) platform_device_put(pdev); rbtx4939_mtd_init(); /* TC58DVM82A1FT: tDH=10ns, tWP=tRP=tREADID=35ns */ tx4939_ndfmc_init(10, 35, (1 << 1) | (1 << 2), (1 << 2)); /* ch1:8bit, ch2:16bit */ rbtx4939_led_setup(); tx4939_wdt_init(); tx4939_ata_init(); tx4939_rtc_init(); tx4939_dmac_init(0, 2); tx4939_aclc_init(); platform_device_register_simple("txx9aclc-generic", -1, NULL, 0); tx4939_sramc_init(); tx4939_rng_init(); } static void __init rbtx4939_setup(void) { int i; rbtx4939_ebusc_setup(); /* always enable ATA0 */ txx9_set64(&tx4939_ccfgptr->pcfg, TX4939_PCFG_ATA0MODE); if (txx9_master_clock == 0) txx9_master_clock = 20000000; tx4939_setup(); rbtx4939_update_ioc_pen(); #ifdef HAVE_RBTX4939_IOSWAB ioswabw = rbtx4939_ioswabw; __mem_ioswabw = rbtx4939_mem_ioswabw; #endif _machine_restart = rbtx4939_machine_restart; txx9_7segled_init(RBTX4939_MAX_7SEGLEDS, rbtx4939_7segled_putc); for (i = 0; i < RBTX4939_MAX_7SEGLEDS; i++) txx9_7segled_putc(i, '-'); pr_info("RBTX4939 (Rev %02x) --- FPGA(Rev %02x) DIPSW:%02x,%02x\n", readb(rbtx4939_board_rev_addr), readb(rbtx4939_ioc_rev_addr), readb(rbtx4939_udipsw_addr), readb(rbtx4939_bdipsw_addr)); #ifdef CONFIG_PCI txx9_alloc_pci_controller(&txx9_primary_pcic, 0, 0, 0, 0); txx9_board_pcibios_setup = tx4927_pcibios_setup; #else set_io_port_base(RBTX4939_ETHER_BASE); #endif tx4939_sio_init(TX4939_SCLK0(txx9_master_clock), 0); } struct txx9_board_vec rbtx4939_vec __initdata = { .system = "Toshiba RBTX4939", .prom_init = rbtx4939_prom_init, .mem_setup = rbtx4939_setup, .irq_setup = rbtx4939_irq_setup, .time_init = rbtx4939_time_init, .device_init = rbtx4939_device_init, .arch_init = rbtx4939_arch_init, #ifdef CONFIG_PCI .pci_map_irq = tx4939_pci_map_irq, #endif };
gpl-2.0
SomethingExplosive/android_kernel_lge_mako
drivers/ide/cmd640.c
5023
22964
/* * Copyright (C) 1995-1996 Linus Torvalds & authors (see below) */ /* * Original authors: abramov@cecmow.enet.dec.com (Igor Abramov) * mlord@pobox.com (Mark Lord) * * See linux/MAINTAINERS for address of current maintainer. * * This file provides support for the advanced features and bugs * of IDE interfaces using the CMD Technologies 0640 IDE interface chip. * * These chips are basically fucked by design, and getting this driver * to work on every motherboard design that uses this screwed chip seems * bloody well impossible. However, we're still trying. * * Version 0.97 worked for everybody. * * User feedback is essential. Many thanks to the beta test team: * * A.Hartgers@stud.tue.nl, JZDQC@CUNYVM.CUNY.edu, abramov@cecmow.enet.dec.com, * bardj@utopia.ppp.sn.no, bart@gaga.tue.nl, bbol001@cs.auckland.ac.nz, * chrisc@dbass.demon.co.uk, dalecki@namu26.Num.Math.Uni-Goettingen.de, * derekn@vw.ece.cmu.edu, florian@btp2x3.phy.uni-bayreuth.de, * flynn@dei.unipd.it, gadio@netvision.net.il, godzilla@futuris.net, * j@pobox.com, jkemp1@mises.uni-paderborn.de, jtoppe@hiwaay.net, * kerouac@ssnet.com, meskes@informatik.rwth-aachen.de, hzoli@cs.elte.hu, * peter@udgaard.isgtec.com, phil@tazenda.demon.co.uk, roadcapw@cfw.com, * s0033las@sun10.vsz.bme.hu, schaffer@tam.cornell.edu, sjd@slip.net, * steve@ei.org, ulrpeg@bigcomm.gun.de, ism@tardis.ed.ac.uk, mack@cray.com * liug@mama.indstate.edu, and others. * * Version 0.01 Initial version, hacked out of ide.c, * and #include'd rather than compiled separately. * This will get cleaned up in a subsequent release. * * Version 0.02 Fixes for vlb initialization code, enable prefetch * for versions 'B' and 'C' of chip by default, * some code cleanup. * * Version 0.03 Added reset of secondary interface, * and black list for devices which are not compatible * with prefetch mode. Separate function for setting * prefetch is added, possibly it will be called some * day from ioctl processing code. * * Version 0.04 Now configs/compiles separate from ide.c * * Version 0.05 Major rewrite of interface timing code. * Added new function cmd640_set_mode to set PIO mode * from ioctl call. New drives added to black list. * * Version 0.06 More code cleanup. Prefetch is enabled only for * detected hard drives, not included in prefetch * black list. * * Version 0.07 Changed to more conservative drive tuning policy. * Unknown drives, which report PIO < 4 are set to * (reported_PIO - 1) if it is supported, or to PIO0. * List of known drives extended by info provided by * CMD at their ftp site. * * Version 0.08 Added autotune/noautotune support. * * Version 0.09 Try to be smarter about 2nd port enabling. * Version 0.10 Be nice and don't reset 2nd port. * Version 0.11 Try to handle more weird situations. * * Version 0.12 Lots of bug fixes from Laszlo Peter * irq unmasking disabled for reliability. * try to be even smarter about the second port. * tidy up source code formatting. * Version 0.13 permit irq unmasking again. * Version 0.90 massive code cleanup, some bugs fixed. * defaults all drives to PIO mode0, prefetch off. * autotune is OFF by default, with compile time flag. * prefetch can be turned OFF/ON using "hdparm -p8/-p9" * (requires hdparm-3.1 or newer) * Version 0.91 first release to linux-kernel list. * Version 0.92 move initial reg dump to separate callable function * change "readahead" to "prefetch" to avoid confusion * Version 0.95 respect original BIOS timings unless autotuning. * tons of code cleanup and rearrangement. * added CONFIG_BLK_DEV_CMD640_ENHANCED option * prevent use of unmask when prefetch is on * Version 0.96 prevent use of io_32bit when prefetch is off * Version 0.97 fix VLB secondary interface for sjd@slip.net * other minor tune-ups: 0.96 was very good. * Version 0.98 ignore PCI version when disabled by BIOS * Version 0.99 display setup/active/recovery clocks with PIO mode * Version 1.00 Mmm.. cannot depend on PCMD_ENA in all systems * Version 1.01 slow/fast devsel can be selected with "hdparm -p6/-p7" * ("fast" is necessary for 32bit I/O in some systems) * Version 1.02 fix bug that resulted in slow "setup times" * (patch courtesy of Zoltan Hidvegi) */ #define CMD640_PREFETCH_MASKS 1 /*#define CMD640_DUMP_REGS */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/ide.h> #include <linux/init.h> #include <linux/module.h> #include <asm/io.h> #define DRV_NAME "cmd640" static bool cmd640_vlb; /* * CMD640 specific registers definition. */ #define VID 0x00 #define DID 0x02 #define PCMD 0x04 #define PCMD_ENA 0x01 #define PSTTS 0x06 #define REVID 0x08 #define PROGIF 0x09 #define SUBCL 0x0a #define BASCL 0x0b #define BaseA0 0x10 #define BaseA1 0x14 #define BaseA2 0x18 #define BaseA3 0x1c #define INTLINE 0x3c #define INPINE 0x3d #define CFR 0x50 #define CFR_DEVREV 0x03 #define CFR_IDE01INTR 0x04 #define CFR_DEVID 0x18 #define CFR_AT_VESA_078h 0x20 #define CFR_DSA1 0x40 #define CFR_DSA0 0x80 #define CNTRL 0x51 #define CNTRL_DIS_RA0 0x40 #define CNTRL_DIS_RA1 0x80 #define CNTRL_ENA_2ND 0x08 #define CMDTIM 0x52 #define ARTTIM0 0x53 #define DRWTIM0 0x54 #define ARTTIM1 0x55 #define DRWTIM1 0x56 #define ARTTIM23 0x57 #define ARTTIM23_DIS_RA2 0x04 #define ARTTIM23_DIS_RA3 0x08 #define ARTTIM23_IDE23INTR 0x10 #define DRWTIM23 0x58 #define BRST 0x59 /* * Registers and masks for easy access by drive index: */ static u8 prefetch_regs[4] = {CNTRL, CNTRL, ARTTIM23, ARTTIM23}; static u8 prefetch_masks[4] = {CNTRL_DIS_RA0, CNTRL_DIS_RA1, ARTTIM23_DIS_RA2, ARTTIM23_DIS_RA3}; #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED static u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23}; static u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM23, DRWTIM23}; /* * Current cmd640 timing values for each drive. * The defaults for each are the slowest possible timings. */ static u8 setup_counts[4] = {4, 4, 4, 4}; /* Address setup count (in clocks) */ static u8 active_counts[4] = {16, 16, 16, 16}; /* Active count (encoded) */ static u8 recovery_counts[4] = {16, 16, 16, 16}; /* Recovery count (encoded) */ #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ static DEFINE_SPINLOCK(cmd640_lock); /* * Interface to access cmd640x registers */ static unsigned int cmd640_key; static void (*__put_cmd640_reg)(u16 reg, u8 val); static u8 (*__get_cmd640_reg)(u16 reg); /* * This is read from the CFR reg, and is used in several places. */ static unsigned int cmd640_chip_version; /* * The CMD640x chip does not support DWORD config write cycles, but some * of the BIOSes use them to implement the config services. * Therefore, we must use direct IO instead. */ /* PCI method 1 access */ static void put_cmd640_reg_pci1(u16 reg, u8 val) { outl_p((reg & 0xfc) | cmd640_key, 0xcf8); outb_p(val, (reg & 3) | 0xcfc); } static u8 get_cmd640_reg_pci1(u16 reg) { outl_p((reg & 0xfc) | cmd640_key, 0xcf8); return inb_p((reg & 3) | 0xcfc); } /* PCI method 2 access (from CMD datasheet) */ static void put_cmd640_reg_pci2(u16 reg, u8 val) { outb_p(0x10, 0xcf8); outb_p(val, cmd640_key + reg); outb_p(0, 0xcf8); } static u8 get_cmd640_reg_pci2(u16 reg) { u8 b; outb_p(0x10, 0xcf8); b = inb_p(cmd640_key + reg); outb_p(0, 0xcf8); return b; } /* VLB access */ static void put_cmd640_reg_vlb(u16 reg, u8 val) { outb_p(reg, cmd640_key); outb_p(val, cmd640_key + 4); } static u8 get_cmd640_reg_vlb(u16 reg) { outb_p(reg, cmd640_key); return inb_p(cmd640_key + 4); } static u8 get_cmd640_reg(u16 reg) { unsigned long flags; u8 b; spin_lock_irqsave(&cmd640_lock, flags); b = __get_cmd640_reg(reg); spin_unlock_irqrestore(&cmd640_lock, flags); return b; } static void put_cmd640_reg(u16 reg, u8 val) { unsigned long flags; spin_lock_irqsave(&cmd640_lock, flags); __put_cmd640_reg(reg, val); spin_unlock_irqrestore(&cmd640_lock, flags); } static int __init match_pci_cmd640_device(void) { const u8 ven_dev[4] = {0x95, 0x10, 0x40, 0x06}; unsigned int i; for (i = 0; i < 4; i++) { if (get_cmd640_reg(i) != ven_dev[i]) return 0; } #ifdef STUPIDLY_TRUST_BROKEN_PCMD_ENA_BIT if ((get_cmd640_reg(PCMD) & PCMD_ENA) == 0) { printk("ide: cmd640 on PCI disabled by BIOS\n"); return 0; } #endif /* STUPIDLY_TRUST_BROKEN_PCMD_ENA_BIT */ return 1; /* success */ } /* * Probe for CMD640x -- pci method 1 */ static int __init probe_for_cmd640_pci1(void) { __get_cmd640_reg = get_cmd640_reg_pci1; __put_cmd640_reg = put_cmd640_reg_pci1; for (cmd640_key = 0x80000000; cmd640_key <= 0x8000f800; cmd640_key += 0x800) { if (match_pci_cmd640_device()) return 1; /* success */ } return 0; } /* * Probe for CMD640x -- pci method 2 */ static int __init probe_for_cmd640_pci2(void) { __get_cmd640_reg = get_cmd640_reg_pci2; __put_cmd640_reg = put_cmd640_reg_pci2; for (cmd640_key = 0xc000; cmd640_key <= 0xcf00; cmd640_key += 0x100) { if (match_pci_cmd640_device()) return 1; /* success */ } return 0; } /* * Probe for CMD640x -- vlb */ static int __init probe_for_cmd640_vlb(void) { u8 b; __get_cmd640_reg = get_cmd640_reg_vlb; __put_cmd640_reg = put_cmd640_reg_vlb; cmd640_key = 0x178; b = get_cmd640_reg(CFR); if (b == 0xff || b == 0x00 || (b & CFR_AT_VESA_078h)) { cmd640_key = 0x78; b = get_cmd640_reg(CFR); if (b == 0xff || b == 0x00 || !(b & CFR_AT_VESA_078h)) return 0; } return 1; /* success */ } /* * Returns 1 if an IDE interface/drive exists at 0x170, * Returns 0 otherwise. */ static int __init secondary_port_responding(void) { unsigned long flags; spin_lock_irqsave(&cmd640_lock, flags); outb_p(0x0a, 0x176); /* select drive0 */ udelay(100); if ((inb_p(0x176) & 0x1f) != 0x0a) { outb_p(0x1a, 0x176); /* select drive1 */ udelay(100); if ((inb_p(0x176) & 0x1f) != 0x1a) { spin_unlock_irqrestore(&cmd640_lock, flags); return 0; /* nothing responded */ } } spin_unlock_irqrestore(&cmd640_lock, flags); return 1; /* success */ } #ifdef CMD640_DUMP_REGS /* * Dump out all cmd640 registers. May be called from ide.c */ static void cmd640_dump_regs(void) { unsigned int reg = cmd640_vlb ? 0x50 : 0x00; /* Dump current state of chip registers */ printk("ide: cmd640 internal register dump:"); for (; reg <= 0x59; reg++) { if (!(reg & 0x0f)) printk("\n%04x:", reg); printk(" %02x", get_cmd640_reg(reg)); } printk("\n"); } #endif static void __set_prefetch_mode(ide_drive_t *drive, int mode) { if (mode) { /* want prefetch on? */ #if CMD640_PREFETCH_MASKS drive->dev_flags |= IDE_DFLAG_NO_UNMASK; drive->dev_flags &= ~IDE_DFLAG_UNMASK; #endif drive->dev_flags &= ~IDE_DFLAG_NO_IO_32BIT; } else { drive->dev_flags &= ~IDE_DFLAG_NO_UNMASK; drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT; drive->io_32bit = 0; } } #ifndef CONFIG_BLK_DEV_CMD640_ENHANCED /* * Check whether prefetch is on for a drive, * and initialize the unmask flags for safe operation. */ static void __init check_prefetch(ide_drive_t *drive, unsigned int index) { u8 b = get_cmd640_reg(prefetch_regs[index]); __set_prefetch_mode(drive, (b & prefetch_masks[index]) ? 0 : 1); } #else /* * Sets prefetch mode for a drive. */ static void set_prefetch_mode(ide_drive_t *drive, unsigned int index, int mode) { unsigned long flags; int reg = prefetch_regs[index]; u8 b; spin_lock_irqsave(&cmd640_lock, flags); b = __get_cmd640_reg(reg); __set_prefetch_mode(drive, mode); if (mode) b &= ~prefetch_masks[index]; /* enable prefetch */ else b |= prefetch_masks[index]; /* disable prefetch */ __put_cmd640_reg(reg, b); spin_unlock_irqrestore(&cmd640_lock, flags); } /* * Dump out current drive clocks settings */ static void display_clocks(unsigned int index) { u8 active_count, recovery_count; active_count = active_counts[index]; if (active_count == 1) ++active_count; recovery_count = recovery_counts[index]; if (active_count > 3 && recovery_count == 1) ++recovery_count; if (cmd640_chip_version > 1) recovery_count += 1; /* cmd640b uses (count + 1)*/ printk(", clocks=%d/%d/%d\n", setup_counts[index], active_count, recovery_count); } /* * Pack active and recovery counts into single byte representation * used by controller */ static inline u8 pack_nibbles(u8 upper, u8 lower) { return ((upper & 0x0f) << 4) | (lower & 0x0f); } /* * This routine writes the prepared setup/active/recovery counts * for a drive into the cmd640 chipset registers to active them. */ static void program_drive_counts(ide_drive_t *drive, unsigned int index) { unsigned long flags; u8 setup_count = setup_counts[index]; u8 active_count = active_counts[index]; u8 recovery_count = recovery_counts[index]; /* * Set up address setup count and drive read/write timing registers. * Primary interface has individual count/timing registers for * each drive. Secondary interface has one common set of registers, * so we merge the timings, using the slowest value for each timing. */ if (index > 1) { ide_drive_t *peer = ide_get_pair_dev(drive); unsigned int mate = index ^ 1; if (peer) { if (setup_count < setup_counts[mate]) setup_count = setup_counts[mate]; if (active_count < active_counts[mate]) active_count = active_counts[mate]; if (recovery_count < recovery_counts[mate]) recovery_count = recovery_counts[mate]; } } /* * Convert setup_count to internal chipset representation */ switch (setup_count) { case 4: setup_count = 0x00; break; case 3: setup_count = 0x80; break; case 1: case 2: setup_count = 0x40; break; default: setup_count = 0xc0; /* case 5 */ } /* * Now that everything is ready, program the new timings */ spin_lock_irqsave(&cmd640_lock, flags); /* * Program the address_setup clocks into ARTTIM reg, * and then the active/recovery counts into the DRWTIM reg * (this converts counts of 16 into counts of zero -- okay). */ setup_count |= __get_cmd640_reg(arttim_regs[index]) & 0x3f; __put_cmd640_reg(arttim_regs[index], setup_count); __put_cmd640_reg(drwtim_regs[index], pack_nibbles(active_count, recovery_count)); spin_unlock_irqrestore(&cmd640_lock, flags); } /* * Set a specific pio_mode for a drive */ static void cmd640_set_mode(ide_drive_t *drive, unsigned int index, u8 pio_mode, unsigned int cycle_time) { struct ide_timing *t; int setup_time, active_time, recovery_time, clock_time; u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count; int bus_speed; if (cmd640_vlb) bus_speed = ide_vlb_clk ? ide_vlb_clk : 50; else bus_speed = ide_pci_clk ? ide_pci_clk : 33; if (pio_mode > 5) pio_mode = 5; t = ide_timing_find_mode(XFER_PIO_0 + pio_mode); setup_time = t->setup; active_time = t->active; recovery_time = cycle_time - (setup_time + active_time); clock_time = 1000 / bus_speed; cycle_count = DIV_ROUND_UP(cycle_time, clock_time); setup_count = DIV_ROUND_UP(setup_time, clock_time); active_count = DIV_ROUND_UP(active_time, clock_time); if (active_count < 2) active_count = 2; /* minimum allowed by cmd640 */ recovery_count = DIV_ROUND_UP(recovery_time, clock_time); recovery_count2 = cycle_count - (setup_count + active_count); if (recovery_count2 > recovery_count) recovery_count = recovery_count2; if (recovery_count < 2) recovery_count = 2; /* minimum allowed by cmd640 */ if (recovery_count > 17) { active_count += recovery_count - 17; recovery_count = 17; } if (active_count > 16) active_count = 16; /* maximum allowed by cmd640 */ if (cmd640_chip_version > 1) recovery_count -= 1; /* cmd640b uses (count + 1)*/ if (recovery_count > 16) recovery_count = 16; /* maximum allowed by cmd640 */ setup_counts[index] = setup_count; active_counts[index] = active_count; recovery_counts[index] = recovery_count; /* * In a perfect world, we might set the drive pio mode here * (using WIN_SETFEATURE) before continuing. * * But we do not, because: * 1) this is the wrong place to do it (proper is do_special() in ide.c) * 2) in practice this is rarely, if ever, necessary */ program_drive_counts(drive, index); } static void cmd640_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { unsigned int index = 0, cycle_time; const u8 pio = drive->pio_mode - XFER_PIO_0; u8 b; switch (pio) { case 6: /* set fast-devsel off */ case 7: /* set fast-devsel on */ b = get_cmd640_reg(CNTRL) & ~0x27; if (pio & 1) b |= 0x27; put_cmd640_reg(CNTRL, b); printk("%s: %sabled cmd640 fast host timing (devsel)\n", drive->name, (pio & 1) ? "en" : "dis"); return; case 8: /* set prefetch off */ case 9: /* set prefetch on */ set_prefetch_mode(drive, index, pio & 1); printk("%s: %sabled cmd640 prefetch\n", drive->name, (pio & 1) ? "en" : "dis"); return; } cycle_time = ide_pio_cycle_time(drive, pio); cmd640_set_mode(drive, index, pio, cycle_time); printk("%s: selected cmd640 PIO mode%d (%dns)", drive->name, pio, cycle_time); display_clocks(index); } #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ static void __init cmd640_init_dev(ide_drive_t *drive) { unsigned int i = drive->hwif->channel * 2 + (drive->dn & 1); #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED /* * Reset timing to the slowest speed and turn off prefetch. * This way, the drive identify code has a better chance. */ setup_counts[i] = 4; /* max possible */ active_counts[i] = 16; /* max possible */ recovery_counts[i] = 16; /* max possible */ program_drive_counts(drive, i); set_prefetch_mode(drive, i, 0); printk(KERN_INFO DRV_NAME ": drive%d timings/prefetch cleared\n", i); #else /* * Set the drive unmask flags to match the prefetch setting. */ check_prefetch(drive, i); printk(KERN_INFO DRV_NAME ": drive%d timings/prefetch(%s) preserved\n", i, (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT) ? "off" : "on"); #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ } static int cmd640_test_irq(ide_hwif_t *hwif) { int irq_reg = hwif->channel ? ARTTIM23 : CFR; u8 irq_mask = hwif->channel ? ARTTIM23_IDE23INTR : CFR_IDE01INTR; u8 irq_stat = get_cmd640_reg(irq_reg); return (irq_stat & irq_mask) ? 1 : 0; } static const struct ide_port_ops cmd640_port_ops = { .init_dev = cmd640_init_dev, #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED .set_pio_mode = cmd640_set_pio_mode, #endif .test_irq = cmd640_test_irq, }; static int pci_conf1(void) { unsigned long flags; u32 tmp; spin_lock_irqsave(&cmd640_lock, flags); outb(0x01, 0xCFB); tmp = inl(0xCF8); outl(0x80000000, 0xCF8); if (inl(0xCF8) == 0x80000000) { outl(tmp, 0xCF8); spin_unlock_irqrestore(&cmd640_lock, flags); return 1; } outl(tmp, 0xCF8); spin_unlock_irqrestore(&cmd640_lock, flags); return 0; } static int pci_conf2(void) { unsigned long flags; spin_lock_irqsave(&cmd640_lock, flags); outb(0x00, 0xCFB); outb(0x00, 0xCF8); outb(0x00, 0xCFA); if (inb(0xCF8) == 0x00 && inb(0xCF8) == 0x00) { spin_unlock_irqrestore(&cmd640_lock, flags); return 1; } spin_unlock_irqrestore(&cmd640_lock, flags); return 0; } static const struct ide_port_info cmd640_port_info __initdata = { .chipset = ide_cmd640, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA | IDE_HFLAG_ABUSE_PREFETCH | IDE_HFLAG_ABUSE_FAST_DEVSEL, .port_ops = &cmd640_port_ops, .pio_mask = ATA_PIO5, }; static int cmd640x_init_one(unsigned long base, unsigned long ctl) { if (!request_region(base, 8, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", DRV_NAME, base, base + 7); return -EBUSY; } if (!request_region(ctl, 1, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n", DRV_NAME, ctl); release_region(base, 8); return -EBUSY; } return 0; } /* * Probe for a cmd640 chipset, and initialize it if found. */ static int __init cmd640x_init(void) { int second_port_cmd640 = 0, rc; const char *bus_type, *port2; u8 b, cfr; struct ide_hw hw[2], *hws[2]; if (cmd640_vlb && probe_for_cmd640_vlb()) { bus_type = "VLB"; } else { cmd640_vlb = 0; /* Find out what kind of PCI probing is supported otherwise Justin Gibbs will sulk.. */ if (pci_conf1() && probe_for_cmd640_pci1()) bus_type = "PCI (type1)"; else if (pci_conf2() && probe_for_cmd640_pci2()) bus_type = "PCI (type2)"; else return 0; } /* * Undocumented magic (there is no 0x5b reg in specs) */ put_cmd640_reg(0x5b, 0xbd); if (get_cmd640_reg(0x5b) != 0xbd) { printk(KERN_ERR "ide: cmd640 init failed: wrong value in reg 0x5b\n"); return 0; } put_cmd640_reg(0x5b, 0); #ifdef CMD640_DUMP_REGS cmd640_dump_regs(); #endif /* * Documented magic begins here */ cfr = get_cmd640_reg(CFR); cmd640_chip_version = cfr & CFR_DEVREV; if (cmd640_chip_version == 0) { printk("ide: bad cmd640 revision: %d\n", cmd640_chip_version); return 0; } rc = cmd640x_init_one(0x1f0, 0x3f6); if (rc) return rc; rc = cmd640x_init_one(0x170, 0x376); if (rc) { release_region(0x3f6, 1); release_region(0x1f0, 8); return rc; } memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw[0], 0x1f0, 0x3f6); hw[0].irq = 14; ide_std_init_ports(&hw[1], 0x170, 0x376); hw[1].irq = 15; printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); /* * Initialize data for primary port */ hws[0] = &hw[0]; /* * Ensure compatibility by always using the slowest timings * for access to the drive's command register block, * and reset the prefetch burstsize to default (512 bytes). * * Maybe we need a way to NOT do these on *some* systems? */ put_cmd640_reg(CMDTIM, 0); put_cmd640_reg(BRST, 0x40); b = get_cmd640_reg(CNTRL); /* * Try to enable the secondary interface, if not already enabled */ if (secondary_port_responding()) { if ((b & CNTRL_ENA_2ND)) { second_port_cmd640 = 1; port2 = "okay"; } else if (cmd640_vlb) { second_port_cmd640 = 1; port2 = "alive"; } else port2 = "not cmd640"; } else { put_cmd640_reg(CNTRL, b ^ CNTRL_ENA_2ND); /* toggle the bit */ if (secondary_port_responding()) { second_port_cmd640 = 1; port2 = "enabled"; } else { put_cmd640_reg(CNTRL, b); /* restore original setting */ port2 = "not responding"; } } /* * Initialize data for secondary cmd640 port, if enabled */ if (second_port_cmd640) hws[1] = &hw[1]; printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n", second_port_cmd640 ? "" : "not ", port2); #ifdef CMD640_DUMP_REGS cmd640_dump_regs(); #endif return ide_host_add(&cmd640_port_info, hws, second_port_cmd640 ? 2 : 1, NULL); } module_param_named(probe_vlb, cmd640_vlb, bool, 0); MODULE_PARM_DESC(probe_vlb, "probe for VLB version of CMD640 chipset"); module_init(cmd640x_init); MODULE_LICENSE("GPL");
gpl-2.0
greg17477/kernel_motley_mako
drivers/gpu/drm/radeon/evergreen_blit_shaders.c
7327
7940
/* * Copyright 2010 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Alex Deucher <alexander.deucher@amd.com> */ #include <linux/bug.h> #include <linux/types.h> #include <linux/kernel.h> /* * evergreen cards need to use the 3D engine to blit data which requires * quite a bit of hw state setup. Rather than pull the whole 3D driver * (which normally generates the 3D state) into the DRM, we opt to use * statically generated state tables. The regsiter state and shaders * were hand generated to support blitting functionality. See the 3D * driver or documentation for descriptions of the registers and * shader instructions. */ const u32 evergreen_default_state[] = { 0xc0016900, 0x0000023b, 0x00000000, /* SQ_LDS_ALLOC_PS */ 0xc0066900, 0x00000240, 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0046900, 0x00000247, 0x00000000, /* SQ_GS_VERT_ITEMSIZE */ 0x00000000, 0x00000000, 0x00000000, 0xc0026900, 0x00000010, 0x00000000, /* DB_Z_INFO */ 0x00000000, /* DB_STENCIL_INFO */ 0xc0016900, 0x00000200, 0x00000000, /* DB_DEPTH_CONTROL */ 0xc0066900, 0x00000000, 0x00000060, /* DB_RENDER_CONTROL */ 0x00000000, /* DB_COUNT_CONTROL */ 0x00000000, /* DB_DEPTH_VIEW */ 0x0000002a, /* DB_RENDER_OVERRIDE */ 0x00000000, /* DB_RENDER_OVERRIDE2 */ 0x00000000, /* DB_HTILE_DATA_BASE */ 0xc0026900, 0x0000000a, 0x00000000, /* DB_STENCIL_CLEAR */ 0x00000000, /* DB_DEPTH_CLEAR */ 0xc0016900, 0x000002dc, 0x0000aa00, /* DB_ALPHA_TO_MASK */ 0xc0016900, 0x00000080, 0x00000000, /* PA_SC_WINDOW_OFFSET */ 0xc00d6900, 0x00000083, 0x0000ffff, /* PA_SC_CLIPRECT_RULE */ 0x00000000, /* PA_SC_CLIPRECT_0_TL */ 0x20002000, /* PA_SC_CLIPRECT_0_BR */ 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0xaaaaaaaa, /* PA_SC_EDGERULE */ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */ 0x0000000f, /* CB_TARGET_MASK */ 0x0000000f, /* CB_SHADER_MASK */ 0xc0226900, 0x00000094, 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */ 0xc0016900, 0x000000d4, 0x00000000, /* SX_MISC */ 0xc0026900, 0x00000292, 0x00000000, /* PA_SC_MODE_CNTL_0 */ 0x00000000, /* PA_SC_MODE_CNTL_1 */ 0xc0106900, 0x00000300, 0x00000000, /* PA_SC_LINE_CNTL */ 0x00000000, /* PA_SC_AA_CONFIG */ 0x00000005, /* PA_SU_VTX_CNTL */ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */ 0xffffffff, /* PA_SC_AA_MASK */ 0xc00d6900, 0x00000202, 0x00cc0010, /* CB_COLOR_CONTROL */ 0x00000210, /* DB_SHADER_CONTROL */ 0x00010000, /* PA_CL_CLIP_CNTL */ 0x00000004, /* PA_SU_SC_MODE_CNTL */ 0x00000100, /* PA_CL_VTE_CNTL */ 0x00000000, /* PA_CL_VS_OUT_CNTL */ 0x00000000, /* PA_CL_NANINF_CNTL */ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */ 0xc0066900, 0x000002de, 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0xc0016900, 0x00000229, 0x00000000, /* SQ_PGM_START_FS */ 0xc0016900, 0x0000022a, 0x00000000, /* SQ_PGM_RESOURCES_FS */ 0xc0096900, 0x00000100, 0x00ffffff, /* VGT_MAX_VTX_INDX */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* SX_ALPHA_TEST_CONTROL */ 0x00000000, /* CB_BLEND_RED */ 0x00000000, /* CB_BLEND_GREEN */ 0x00000000, /* CB_BLEND_BLUE */ 0x00000000, /* CB_BLEND_ALPHA */ 0xc0026900, 0x000002a8, 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ 0x00000000, /* */ 0xc0026900, 0x000002ad, 0x00000000, /* VGT_REUSE_OFF */ 0x00000000, /* */ 0xc0116900, 0x00000280, 0x00000000, /* PA_SU_POINT_SIZE */ 0x00000000, /* PA_SU_POINT_MINMAX */ 0x00000008, /* PA_SU_LINE_CNTL */ 0x00000000, /* PA_SC_LINE_STIPPLE */ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ 0x00000000, /* VGT_HOS_CNTL */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* VGT_GS_MODE */ 0xc0016900, 0x000002a1, 0x00000000, /* VGT_PRIMITIVEID_EN */ 0xc0016900, 0x000002a5, 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */ 0xc0016900, 0x000002d5, 0x00000000, /* VGT_SHADER_STAGES_EN */ 0xc0026900, 0x000002e5, 0x00000000, /* VGT_STRMOUT_CONFIG */ 0x00000000, /* */ 0xc0016900, 0x000001e0, 0x00000000, /* CB_BLEND0_CONTROL */ 0xc0016900, 0x000001b1, 0x00000000, /* SPI_VS_OUT_CONFIG */ 0xc0016900, 0x00000187, 0x00000000, /* SPI_VS_OUT_ID_0 */ 0xc0016900, 0x00000191, 0x00000100, /* SPI_PS_INPUT_CNTL_0 */ 0xc00b6900, 0x000001b3, 0x20000001, /* SPI_PS_IN_CONTROL_0 */ 0x00000000, /* SPI_PS_IN_CONTROL_1 */ 0x00000000, /* SPI_INTERP_CONTROL_0 */ 0x00000000, /* SPI_INPUT_Z */ 0x00000000, /* SPI_FOG_CNTL */ 0x00100000, /* SPI_BARYC_CNTL */ 0x00000000, /* SPI_PS_IN_CONTROL_2 */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0x00000000, /* */ 0xc0026900, 0x00000316, 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 0x00000010, /* */ }; const u32 evergreen_vs[] = { 0x00000004, 0x80800400, 0x0000a03c, 0x95000688, 0x00004000, 0x15200688, 0x00000000, 0x00000000, 0x3c000000, 0x67961001, #ifdef __BIG_ENDIAN 0x000a0000, #else 0x00080000, #endif 0x00000000, 0x1c000000, 0x67961000, #ifdef __BIG_ENDIAN 0x00020008, #else 0x00000008, #endif 0x00000000, }; const u32 evergreen_ps[] = { 0x00000003, 0xa00c0000, 0x00000008, 0x80400000, 0x00000000, 0x95200688, 0x00380400, 0x00146b10, 0x00380000, 0x20146b10, 0x00380400, 0x40146b00, 0x80380000, 0x60146b00, 0x00000000, 0x00000000, 0x00000010, 0x000d1000, 0xb0800000, 0x00000000, }; const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps); const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs); const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
gpl-2.0
houst0nn/android_kernel_lge_galbi
arch/cris/mm/init.c
7583
2142
/* * linux/arch/cris/mm/init.c * * Copyright (C) 1995 Linus Torvalds * Copyright (C) 2000,2001 Axis Communications AB * * Authors: Bjorn Wesen (bjornw@axis.com) * */ #include <linux/gfp.h> #include <linux/init.h> #include <linux/bootmem.h> #include <asm/tlb.h> unsigned long empty_zero_page; extern char _stext, _edata, _etext; /* From linkerscript */ extern char __init_begin, __init_end; void __init mem_init(void) { int codesize, reservedpages, datasize, initsize; unsigned long tmp; BUG_ON(!mem_map); /* max/min_low_pfn was set by setup.c * now we just copy it to some other necessary places... * * high_memory was also set in setup.c */ max_mapnr = num_physpages = max_low_pfn - min_low_pfn; /* this will put all memory onto the freelists */ totalram_pages = free_all_bootmem(); reservedpages = 0; for (tmp = 0; tmp < max_mapnr; tmp++) { /* * Only count reserved RAM pages */ if (PageReserved(mem_map + tmp)) reservedpages++; } codesize = (unsigned long) &_etext - (unsigned long) &_stext; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, " "%dk init)\n" , nr_free_pages() << (PAGE_SHIFT-10), max_mapnr << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10 ); } /* free the pages occupied by initialization code */ void free_initmem(void) { unsigned long addr; addr = (unsigned long)(&__init_begin); for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); free_page(addr); totalram_pages++; } printk (KERN_INFO "Freeing unused kernel memory: %luk freed\n", (unsigned long)((&__init_end - &__init_begin) >> 10)); }
gpl-2.0
TeamBliss-Devices/android_kernel_samsung_s3ve3g
drivers/staging/speakup/speakup_decext.c
7583
7240
/* * originally written by: Kirk Reiser <kirk@braille.uwo.ca> * this version considerably modified by David Borowski, david575@rogers.com * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * specificly written as a driver for the speakup screenreview * s not a general device driver. */ #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "spk_priv.h" #include "serialio.h" #include "speakup.h" #define DRV_VERSION "2.14" #define SYNTH_CLEAR 0x03 #define PROCSPEECH 0x0b static unsigned char last_char; static inline u_char get_last_char(void) { u_char avail = inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR; if (avail) last_char = inb_p(speakup_info.port_tts + UART_RX); return last_char; } static inline bool synth_full(void) { return get_last_char() == 0x13; } static void do_catch_up(struct spk_synth *synth); static void synth_flush(struct spk_synth *synth); static int in_escape; static struct var_t vars[] = { { CAPS_START, .u.s = {"[:dv ap 222]" } }, { CAPS_STOP, .u.s = {"[:dv ap 100]" } }, { RATE, .u.n = {"[:ra %d]", 7, 0, 9, 150, 25, NULL } }, { PITCH, .u.n = {"[:dv ap %d]", 100, 0, 100, 0, 0, NULL } }, { VOL, .u.n = {"[:dv gv %d]", 13, 0, 16, 0, 5, NULL } }, { PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } }, { VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/decext. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, USER_RW, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &pitch_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static struct spk_synth synth_decext = { .name = "decext", .version = DRV_VERSION, .long_name = "Dectalk External", .init = "[:pe -380]", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 40000, .flags = SF_DEC, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .probe = serial_synth_probe, .release = spk_serial_release, .synth_immediate = spk_synth_immediate, .catch_up = do_catch_up, .flush = synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, .read_buff_add = NULL, .get_index = NULL, .indexing = { .command = NULL, .lowindex = 0, .highindex = 0, .currindex = 0, }, .attributes = { .attrs = synth_attrs, .name = "decext", }, }; static void do_catch_up(struct spk_synth *synth) { u_char ch; static u_char last = '\0'; unsigned long flags; unsigned long jiff_max; struct var_t *jiffy_delta; struct var_t *delay_time; int jiffy_delta_val = 0; int delay_time_val = 0; jiffy_delta = get_var(JIFFY); delay_time = get_var(DELAY); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; spk_unlock(flags); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { spk_lock(flags); if (speakup_info.flushing) { speakup_info.flushing = 0; spk_unlock(flags); synth->flush(synth); continue; } if (synth_buffer_empty()) { spk_unlock(flags); break; } ch = synth_buffer_peek(); set_current_state(TASK_INTERRUPTIBLE); delay_time_val = delay_time->u.n.value; spk_unlock(flags); if (ch == '\n') ch = 0x0D; if (synth_full() || !spk_serial_out(ch)) { schedule_timeout(msecs_to_jiffies(delay_time_val)); continue; } set_current_state(TASK_RUNNING); spk_lock(flags); synth_buffer_getc(); spk_unlock(flags); if (ch == '[') in_escape = 1; else if (ch == ']') in_escape = 0; else if (ch <= SPACE) { if (!in_escape && strchr(",.!?;:", last)) spk_serial_out(PROCSPEECH); if (jiffies >= jiff_max) { if (!in_escape) spk_serial_out(PROCSPEECH); spk_lock(flags); jiffy_delta_val = jiffy_delta->u.n.value; delay_time_val = delay_time->u.n.value; spk_unlock(flags); schedule_timeout(msecs_to_jiffies (delay_time_val)); jiff_max = jiffies + jiffy_delta_val; } } last = ch; } if (!in_escape) spk_serial_out(PROCSPEECH); } static void synth_flush(struct spk_synth *synth) { in_escape = 0; spk_synth_immediate(synth, "\033P;10z\033\\"); } module_param_named(ser, synth_decext.ser, int, S_IRUGO); module_param_named(start, synth_decext.startup, short, S_IRUGO); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); static int __init decext_init(void) { return synth_add(&synth_decext); } static void __exit decext_exit(void) { synth_remove(&synth_decext); } module_init(decext_init); module_exit(decext_exit); MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for DECtalk External synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
toyota86/xoompus
arch/mips/math-emu/sp_add.c
7839
4587
/* IEEE754 floating point arithmetic * single precision */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754sp.h" ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y) { COMPXSP; COMPYSP; EXPLODEXSP; EXPLODEYSP; CLEARCX; FLUSHXSP; FLUSHYSP; switch (CLPAIR(xc, yc)) { case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF): SETCX(IEEE754_INVALID_OPERATION); return ieee754sp_nanxcpt(ieee754sp_indef(), "add", x, y); case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN): return y; case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM): case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF): return x; /* Infinity handling */ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF): if (xs == ys) return x; SETCX(IEEE754_INVALID_OPERATION); return ieee754sp_xcpt(ieee754sp_indef(), "add", x, y); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF): return y; case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM): return x; /* Zero handling */ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO): if (xs == ys) return x; else return ieee754sp_zero(ieee754_csr.rm == IEEE754_RD); case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO): case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO): return x; case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM): case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM): return y; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): SPDNORMX; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): SPDNORMY; break; case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM): SPDNORMX; break; case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM): break; } assert(xm & SP_HIDDEN_BIT); assert(ym & SP_HIDDEN_BIT); /* provide guard,round and stick bit space */ xm <<= 3; ym <<= 3; if (xe > ye) { /* have to shift y fraction right to align */ int s = xe - ye; SPXSRSYn(s); } else if (ye > xe) { /* have to shift x fraction right to align */ int s = ye - xe; SPXSRSXn(s); } assert(xe == ye); assert(xe <= SP_EMAX); if (xs == ys) { /* generate 28 bit result of adding two 27 bit numbers * leaving result in xm,xs,xe */ xm = xm + ym; xe = xe; xs = xs; if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */ SPXSRSX1(); } } else { if (xm >= ym) { xm = xm - ym; xe = xe; xs = xs; } else { xm = ym - xm; xe = xe; xs = ys; } if (xm == 0) return ieee754sp_zero(ieee754_csr.rm == IEEE754_RD); /* normalize in extended single precision */ while ((xm >> (SP_MBITS + 3)) == 0) { xm <<= 1; xe--; } } SPNORMRET2(xs, xe, xm, "add", x, y); }
gpl-2.0
dmore70/nexus7-kernel
arch/x86/mm/pat.c
8095
20187
/* * Handle caching attributes in page tables (PAT) * * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * Suresh B Siddha <suresh.b.siddha@intel.com> * * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. */ #include <linux/seq_file.h> #include <linux/bootmem.h> #include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/rbtree.h> #include <asm/cacheflush.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/x86_init.h> #include <asm/pgtable.h> #include <asm/fcntl.h> #include <asm/e820.h> #include <asm/mtrr.h> #include <asm/page.h> #include <asm/msr.h> #include <asm/pat.h> #include <asm/io.h> #include "pat_internal.h" #ifdef CONFIG_X86_PAT int __read_mostly pat_enabled = 1; static inline void pat_disable(const char *reason) { pat_enabled = 0; printk(KERN_INFO "%s\n", reason); } static int __init nopat(char *str) { pat_disable("PAT support disabled."); return 0; } early_param("nopat", nopat); #else static inline void pat_disable(const char *reason) { (void)reason; } #endif int pat_debug_enable; static int __init pat_debug_setup(char *str) { pat_debug_enable = 1; return 0; } __setup("debugpat", pat_debug_setup); static u64 __read_mostly boot_pat_state; enum { PAT_UC = 0, /* uncached */ PAT_WC = 1, /* Write combining */ PAT_WT = 4, /* Write Through */ PAT_WP = 5, /* Write Protected */ PAT_WB = 6, /* Write Back (default) */ PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ }; #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) void pat_init(void) { u64 pat; bool boot_cpu = !boot_pat_state; if (!pat_enabled) return; if (!cpu_has_pat) { if (!boot_pat_state) { pat_disable("PAT not supported by CPU."); return; } else { /* * If this happens we are on a secondary CPU, but * switched to PAT on the boot CPU. We have no way to * undo PAT. */ printk(KERN_ERR "PAT enabled, " "but not supported by secondary CPU\n"); BUG(); } } /* Set PWT to Write-Combining. All other bits stay the same */ /* * PTE encoding used in Linux: * PAT * |PCD * ||PWT * ||| * 000 WB _PAGE_CACHE_WB * 001 WC _PAGE_CACHE_WC * 010 UC- _PAGE_CACHE_UC_MINUS * 011 UC _PAGE_CACHE_UC * PAT bit unused */ pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); /* Boot CPU check */ if (!boot_pat_state) rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); wrmsrl(MSR_IA32_CR_PAT, pat); if (boot_cpu) printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", smp_processor_id(), boot_pat_state, pat); } #undef PAT static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ /* * Does intersection of PAT memory type and MTRR memory type and returns * the resulting memory type as PAT understands it. * (Type in pat and mtrr will not have same value) * The intersection is based on "Effective Memory Type" tables in IA-32 * SDM vol 3a */ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) { /* * Look for MTRR hint to get the effective type in case where PAT * request is for WB. */ if (req_type == _PAGE_CACHE_WB) { u8 mtrr_type; mtrr_type = mtrr_type_lookup(start, end); if (mtrr_type != MTRR_TYPE_WRBACK) return _PAGE_CACHE_UC_MINUS; return _PAGE_CACHE_WB; } return req_type; } static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) { int ram_page = 0, not_rampage = 0; unsigned long page_nr; for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT); ++page_nr) { /* * For legacy reasons, physical address range in the legacy ISA * region is tracked as non-RAM. This will allow users of * /dev/mem to map portions of legacy ISA region, even when * some of those portions are listed(or not even listed) with * different e820 types(RAM/reserved/..) */ if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) && page_is_ram(page_nr)) ram_page = 1; else not_rampage = 1; if (ram_page == not_rampage) return -1; } return ram_page; } /* * For RAM pages, we use page flags to mark the pages with appropriate type. * Here we do two pass: * - Find the memtype of all the pages in the range, look for any conflicts * - In case of no conflicts, set the new memtype for pages in the range */ static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, unsigned long *new_type) { struct page *page; u64 pfn; if (req_type == _PAGE_CACHE_UC) { /* We do not support strong UC */ WARN_ON_ONCE(1); req_type = _PAGE_CACHE_UC_MINUS; } for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { unsigned long type; page = pfn_to_page(pfn); type = get_page_memtype(page); if (type != -1) { printk(KERN_INFO "reserve_ram_pages_type failed " "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n", start, end, type, req_type); if (new_type) *new_type = type; return -EBUSY; } } if (new_type) *new_type = req_type; for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { page = pfn_to_page(pfn); set_page_memtype(page, req_type); } return 0; } static int free_ram_pages_type(u64 start, u64 end) { struct page *page; u64 pfn; for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { page = pfn_to_page(pfn); set_page_memtype(page, -1); } return 0; } /* * req_type typically has one of the: * - _PAGE_CACHE_WB * - _PAGE_CACHE_WC * - _PAGE_CACHE_UC_MINUS * - _PAGE_CACHE_UC * * If new_type is NULL, function will return an error if it cannot reserve the * region with req_type. If new_type is non-NULL, function will return * available type in new_type in case of no error. In case of any error * it will return a negative return value. */ int reserve_memtype(u64 start, u64 end, unsigned long req_type, unsigned long *new_type) { struct memtype *new; unsigned long actual_type; int is_range_ram; int err = 0; BUG_ON(start >= end); /* end is exclusive */ if (!pat_enabled) { /* This is identical to page table setting without PAT */ if (new_type) { if (req_type == _PAGE_CACHE_WC) *new_type = _PAGE_CACHE_UC_MINUS; else *new_type = req_type & _PAGE_CACHE_MASK; } return 0; } /* Low ISA region is always mapped WB in page table. No need to track */ if (x86_platform.is_untracked_pat_range(start, end)) { if (new_type) *new_type = _PAGE_CACHE_WB; return 0; } /* * Call mtrr_lookup to get the type hint. This is an * optimization for /dev/mem mmap'ers into WB memory (BIOS * tools and ACPI tools). Use WB request for WB memory and use * UC_MINUS otherwise. */ actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); if (new_type) *new_type = actual_type; is_range_ram = pat_pagerange_is_ram(start, end); if (is_range_ram == 1) { err = reserve_ram_pages_type(start, end, req_type, new_type); return err; } else if (is_range_ram < 0) { return -EINVAL; } new = kzalloc(sizeof(struct memtype), GFP_KERNEL); if (!new) return -ENOMEM; new->start = start; new->end = end; new->type = actual_type; spin_lock(&memtype_lock); err = rbt_memtype_check_insert(new, new_type); if (err) { printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " "track %s, req %s\n", start, end, cattr_name(new->type), cattr_name(req_type)); kfree(new); spin_unlock(&memtype_lock); return err; } spin_unlock(&memtype_lock); dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", start, end, cattr_name(new->type), cattr_name(req_type), new_type ? cattr_name(*new_type) : "-"); return err; } int free_memtype(u64 start, u64 end) { int err = -EINVAL; int is_range_ram; struct memtype *entry; if (!pat_enabled) return 0; /* Low ISA region is always mapped WB. No need to track */ if (x86_platform.is_untracked_pat_range(start, end)) return 0; is_range_ram = pat_pagerange_is_ram(start, end); if (is_range_ram == 1) { err = free_ram_pages_type(start, end); return err; } else if (is_range_ram < 0) { return -EINVAL; } spin_lock(&memtype_lock); entry = rbt_memtype_erase(start, end); spin_unlock(&memtype_lock); if (!entry) { printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", current->comm, current->pid, start, end); return -EINVAL; } kfree(entry); dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); return 0; } /** * lookup_memtype - Looksup the memory type for a physical address * @paddr: physical address of which memory type needs to be looked up * * Only to be called when PAT is enabled * * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or * _PAGE_CACHE_UC */ static unsigned long lookup_memtype(u64 paddr) { int rettype = _PAGE_CACHE_WB; struct memtype *entry; if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) return rettype; if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { struct page *page; page = pfn_to_page(paddr >> PAGE_SHIFT); rettype = get_page_memtype(page); /* * -1 from get_page_memtype() implies RAM page is in its * default state and not reserved, and hence of type WB */ if (rettype == -1) rettype = _PAGE_CACHE_WB; return rettype; } spin_lock(&memtype_lock); entry = rbt_memtype_lookup(paddr); if (entry != NULL) rettype = entry->type; else rettype = _PAGE_CACHE_UC_MINUS; spin_unlock(&memtype_lock); return rettype; } /** * io_reserve_memtype - Request a memory type mapping for a region of memory * @start: start (physical address) of the region * @end: end (physical address) of the region * @type: A pointer to memtype, with requested type. On success, requested * or any other compatible type that was available for the region is returned * * On success, returns 0 * On failure, returns non-zero */ int io_reserve_memtype(resource_size_t start, resource_size_t end, unsigned long *type) { resource_size_t size = end - start; unsigned long req_type = *type; unsigned long new_type; int ret; WARN_ON_ONCE(iomem_map_sanity_check(start, size)); ret = reserve_memtype(start, end, req_type, &new_type); if (ret) goto out_err; if (!is_new_memtype_allowed(start, size, req_type, new_type)) goto out_free; if (kernel_map_sync_memtype(start, size, new_type) < 0) goto out_free; *type = new_type; return 0; out_free: free_memtype(start, end); ret = -EBUSY; out_err: return ret; } /** * io_free_memtype - Release a memory type mapping for a region of memory * @start: start (physical address) of the region * @end: end (physical address) of the region */ void io_free_memtype(resource_size_t start, resource_size_t end) { free_memtype(start, end); } pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { return vma_prot; } #ifdef CONFIG_STRICT_DEVMEM /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/ static inline int range_is_allowed(unsigned long pfn, unsigned long size) { return 1; } #else /* This check is needed to avoid cache aliasing when PAT is enabled */ static inline int range_is_allowed(unsigned long pfn, unsigned long size) { u64 from = ((u64)pfn) << PAGE_SHIFT; u64 to = from + size; u64 cursor = from; if (!pat_enabled) return 1; while (cursor < to) { if (!devmem_is_allowed(pfn)) { printk(KERN_INFO "Program %s tried to access /dev/mem between %Lx->%Lx.\n", current->comm, from, to); return 0; } cursor += PAGE_SIZE; pfn++; } return 1; } #endif /* CONFIG_STRICT_DEVMEM */ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { unsigned long flags = _PAGE_CACHE_WB; if (!range_is_allowed(pfn, size)) return 0; if (file->f_flags & O_DSYNC) flags = _PAGE_CACHE_UC_MINUS; #ifdef CONFIG_X86_32 /* * On the PPro and successors, the MTRRs are used to set * memory types for physical addresses outside main memory, * so blindly setting UC or PWT on those pages is wrong. * For Pentiums and earlier, the surround logic should disable * caching for the high addresses through the KEN pin, but * we maintain the tradition of paranoia in this code. */ if (!pat_enabled && !(boot_cpu_has(X86_FEATURE_MTRR) || boot_cpu_has(X86_FEATURE_K6_MTRR) || boot_cpu_has(X86_FEATURE_CYRIX_ARR) || boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && (pfn << PAGE_SHIFT) >= __pa(high_memory)) { flags = _PAGE_CACHE_UC; } #endif *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | flags); return 1; } /* * Change the memory type for the physial address range in kernel identity * mapping space if that range is a part of identity map. */ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) { unsigned long id_sz; if (base >= __pa(high_memory)) return 0; id_sz = (__pa(high_memory) < base + size) ? __pa(high_memory) - base : size; if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " "for %Lx-%Lx\n", current->comm, current->pid, cattr_name(flags), base, (unsigned long long)(base + size)); return -EINVAL; } return 0; } /* * Internal interface to reserve a range of physical memory with prot. * Reserved non RAM regions only and after successful reserve_memtype, * this func also keeps identity mapping (if any) in sync with this new prot. */ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, int strict_prot) { int is_ram = 0; int ret; unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); unsigned long flags = want_flags; is_ram = pat_pagerange_is_ram(paddr, paddr + size); /* * reserve_pfn_range() for RAM pages. We do not refcount to keep * track of number of mappings of RAM pages. We can assert that * the type requested matches the type of first page in the range. */ if (is_ram) { if (!pat_enabled) return 0; flags = lookup_memtype(paddr); if (want_flags != flags) { printk(KERN_WARNING "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n", current->comm, current->pid, cattr_name(want_flags), (unsigned long long)paddr, (unsigned long long)(paddr + size), cattr_name(flags)); *vma_prot = __pgprot((pgprot_val(*vma_prot) & (~_PAGE_CACHE_MASK)) | flags); } return 0; } ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); if (ret) return ret; if (flags != want_flags) { if (strict_prot || !is_new_memtype_allowed(paddr, size, want_flags, flags)) { free_memtype(paddr, paddr + size); printk(KERN_ERR "%s:%d map pfn expected mapping type %s" " for %Lx-%Lx, got %s\n", current->comm, current->pid, cattr_name(want_flags), (unsigned long long)paddr, (unsigned long long)(paddr + size), cattr_name(flags)); return -EINVAL; } /* * We allow returning different type than the one requested in * non strict case. */ *vma_prot = __pgprot((pgprot_val(*vma_prot) & (~_PAGE_CACHE_MASK)) | flags); } if (kernel_map_sync_memtype(paddr, size, flags) < 0) { free_memtype(paddr, paddr + size); return -EINVAL; } return 0; } /* * Internal interface to free a range of physical memory. * Frees non RAM regions only. */ static void free_pfn_range(u64 paddr, unsigned long size) { int is_ram; is_ram = pat_pagerange_is_ram(paddr, paddr + size); if (is_ram == 0) free_memtype(paddr, paddr + size); } /* * track_pfn_vma_copy is called when vma that is covering the pfnmap gets * copied through copy_page_range(). * * If the vma has a linear pfn mapping for the entire range, we get the prot * from pte and reserve the entire vma range with single reserve_pfn_range call. */ int track_pfn_vma_copy(struct vm_area_struct *vma) { resource_size_t paddr; unsigned long prot; unsigned long vma_size = vma->vm_end - vma->vm_start; pgprot_t pgprot; if (is_linear_pfn_mapping(vma)) { /* * reserve the whole chunk covered by vma. We need the * starting address and protection from pte. */ if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { WARN_ON_ONCE(1); return -EINVAL; } pgprot = __pgprot(prot); return reserve_pfn_range(paddr, vma_size, &pgprot, 1); } return 0; } /* * track_pfn_vma_new is called when a _new_ pfn mapping is being established * for physical range indicated by pfn and size. * * prot is passed in as a parameter for the new mapping. If the vma has a * linear pfn mapping for the entire range reserve the entire vma range with * single reserve_pfn_range call. */ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size) { unsigned long flags; resource_size_t paddr; unsigned long vma_size = vma->vm_end - vma->vm_start; if (is_linear_pfn_mapping(vma)) { /* reserve the whole chunk starting from vm_pgoff */ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; return reserve_pfn_range(paddr, vma_size, prot, 0); } if (!pat_enabled) return 0; /* for vm_insert_pfn and friends, we set prot based on lookup */ flags = lookup_memtype(pfn << PAGE_SHIFT); *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | flags); return 0; } /* * untrack_pfn_vma is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or * can be for the entire vma (in which case size can be zero). */ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) { resource_size_t paddr; unsigned long vma_size = vma->vm_end - vma->vm_start; if (is_linear_pfn_mapping(vma)) { /* free the whole chunk starting from vm_pgoff */ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; free_pfn_range(paddr, vma_size); return; } } pgprot_t pgprot_writecombine(pgprot_t prot) { if (pat_enabled) return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); else return pgprot_noncached(prot); } EXPORT_SYMBOL_GPL(pgprot_writecombine); #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) static struct memtype *memtype_get_idx(loff_t pos) { struct memtype *print_entry; int ret; print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL); if (!print_entry) return NULL; spin_lock(&memtype_lock); ret = rbt_memtype_copy_nth_element(print_entry, pos); spin_unlock(&memtype_lock); if (!ret) { return print_entry; } else { kfree(print_entry); return NULL; } } static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos == 0) { ++*pos; seq_printf(seq, "PAT memtype list:\n"); } return memtype_get_idx(*pos); } static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return memtype_get_idx(*pos); } static void memtype_seq_stop(struct seq_file *seq, void *v) { } static int memtype_seq_show(struct seq_file *seq, void *v) { struct memtype *print_entry = (struct memtype *)v; seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), print_entry->start, print_entry->end); kfree(print_entry); return 0; } static const struct seq_operations memtype_seq_ops = { .start = memtype_seq_start, .next = memtype_seq_next, .stop = memtype_seq_stop, .show = memtype_seq_show, }; static int memtype_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &memtype_seq_ops); } static const struct file_operations memtype_fops = { .open = memtype_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init pat_memtype_list_init(void) { if (pat_enabled) { debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir, NULL, &memtype_fops); } return 0; } late_initcall(pat_memtype_list_init); #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
gpl-2.0
dirtyredmi2/kernel
arch/powerpc/platforms/cell/celleb_scc_epci.c
8351
10036
/* * Support for SCC external PCI * * (C) Copyright 2004-2007 TOSHIBA CORPORATION * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #undef DEBUG #include <linux/kernel.h> #include <linux/threads.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/pci_regs.h> #include <linux/bootmem.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include "celleb_scc.h" #include "celleb_pci.h" #define MAX_PCI_DEVICES 32 #define MAX_PCI_FUNCTIONS 8 #define iob() __asm__ __volatile__("eieio; sync":::"memory") static inline PCI_IO_ADDR celleb_epci_get_epci_base( struct pci_controller *hose) { /* * Note: * Celleb epci uses cfg_addr as a base address for * epci control registers. */ return hose->cfg_addr; } static inline PCI_IO_ADDR celleb_epci_get_epci_cfg( struct pci_controller *hose) { /* * Note: * Celleb epci uses cfg_data as a base address for * configuration area for epci devices. */ return hose->cfg_data; } static inline void clear_and_disable_master_abort_interrupt( struct pci_controller *hose) { PCI_IO_ADDR epci_base; PCI_IO_ADDR reg; epci_base = celleb_epci_get_epci_base(hose); reg = epci_base + PCI_COMMAND; out_be32(reg, in_be32(reg) | (PCI_STATUS_REC_MASTER_ABORT << 16)); } static int celleb_epci_check_abort(struct pci_controller *hose, PCI_IO_ADDR addr) { PCI_IO_ADDR reg; PCI_IO_ADDR epci_base; u32 val; iob(); epci_base = celleb_epci_get_epci_base(hose); reg = epci_base + PCI_COMMAND; val = in_be32(reg); if (val & (PCI_STATUS_REC_MASTER_ABORT << 16)) { out_be32(reg, (val & 0xffff) | (PCI_STATUS_REC_MASTER_ABORT << 16)); /* clear PCI Controller error, FRE, PMFE */ reg = epci_base + SCC_EPCI_STATUS; out_be32(reg, SCC_EPCI_INT_PAI); reg = epci_base + SCC_EPCI_VCSR; val = in_be32(reg) & 0xffff; val |= SCC_EPCI_VCSR_FRE; out_be32(reg, val); reg = epci_base + SCC_EPCI_VISTAT; out_be32(reg, SCC_EPCI_VISTAT_PMFE); return PCIBIOS_DEVICE_NOT_FOUND; } return PCIBIOS_SUCCESSFUL; } static PCI_IO_ADDR celleb_epci_make_config_addr(struct pci_bus *bus, struct pci_controller *hose, unsigned int devfn, int where) { PCI_IO_ADDR addr; if (bus != hose->bus) addr = celleb_epci_get_epci_cfg(hose) + (((bus->number & 0xff) << 16) | ((devfn & 0xff) << 8) | (where & 0xff) | 0x01000000); else addr = celleb_epci_get_epci_cfg(hose) + (((devfn & 0xff) << 8) | (where & 0xff)); pr_debug("EPCI: config_addr = 0x%p\n", addr); return addr; } static int celleb_epci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { PCI_IO_ADDR epci_base; PCI_IO_ADDR addr; struct pci_controller *hose = pci_bus_to_host(bus); /* allignment check */ BUG_ON(where % size); if (!celleb_epci_get_epci_cfg(hose)) return PCIBIOS_DEVICE_NOT_FOUND; if (bus->number == hose->first_busno && devfn == 0) { /* EPCI controller self */ epci_base = celleb_epci_get_epci_base(hose); addr = epci_base + where; switch (size) { case 1: *val = in_8(addr); break; case 2: *val = in_be16(addr); break; case 4: *val = in_be32(addr); break; default: return PCIBIOS_DEVICE_NOT_FOUND; } } else { clear_and_disable_master_abort_interrupt(hose); addr = celleb_epci_make_config_addr(bus, hose, devfn, where); switch (size) { case 1: *val = in_8(addr); break; case 2: *val = in_le16(addr); break; case 4: *val = in_le32(addr); break; default: return PCIBIOS_DEVICE_NOT_FOUND; } } pr_debug("EPCI: " "addr=0x%p, devfn=0x%x, where=0x%x, size=0x%x, val=0x%x\n", addr, devfn, where, size, *val); return celleb_epci_check_abort(hose, NULL); } static int celleb_epci_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { PCI_IO_ADDR epci_base; PCI_IO_ADDR addr; struct pci_controller *hose = pci_bus_to_host(bus); /* allignment check */ BUG_ON(where % size); if (!celleb_epci_get_epci_cfg(hose)) return PCIBIOS_DEVICE_NOT_FOUND; if (bus->number == hose->first_busno && devfn == 0) { /* EPCI controller self */ epci_base = celleb_epci_get_epci_base(hose); addr = epci_base + where; switch (size) { case 1: out_8(addr, val); break; case 2: out_be16(addr, val); break; case 4: out_be32(addr, val); break; default: return PCIBIOS_DEVICE_NOT_FOUND; } } else { clear_and_disable_master_abort_interrupt(hose); addr = celleb_epci_make_config_addr(bus, hose, devfn, where); switch (size) { case 1: out_8(addr, val); break; case 2: out_le16(addr, val); break; case 4: out_le32(addr, val); break; default: return PCIBIOS_DEVICE_NOT_FOUND; } } return celleb_epci_check_abort(hose, addr); } struct pci_ops celleb_epci_ops = { .read = celleb_epci_read_config, .write = celleb_epci_write_config, }; /* to be moved in FW */ static int __init celleb_epci_init(struct pci_controller *hose) { u32 val; PCI_IO_ADDR reg; PCI_IO_ADDR epci_base; int hwres = 0; epci_base = celleb_epci_get_epci_base(hose); /* PCI core reset(Internal bus and PCI clock) */ reg = epci_base + SCC_EPCI_CKCTRL; val = in_be32(reg); if (val == 0x00030101) hwres = 1; else { val &= ~(SCC_EPCI_CKCTRL_CRST0 | SCC_EPCI_CKCTRL_CRST1); out_be32(reg, val); /* set PCI core clock */ val = in_be32(reg); val |= (SCC_EPCI_CKCTRL_OCLKEN | SCC_EPCI_CKCTRL_LCLKEN); out_be32(reg, val); /* release PCI core reset (internal bus) */ val = in_be32(reg); val |= SCC_EPCI_CKCTRL_CRST0; out_be32(reg, val); /* set PCI clock select */ reg = epci_base + SCC_EPCI_CLKRST; val = in_be32(reg); val &= ~SCC_EPCI_CLKRST_CKS_MASK; val |= SCC_EPCI_CLKRST_CKS_2; out_be32(reg, val); /* set arbiter */ reg = epci_base + SCC_EPCI_ABTSET; out_be32(reg, 0x0f1f001f); /* temporary value */ /* buffer on */ reg = epci_base + SCC_EPCI_CLKRST; val = in_be32(reg); val |= SCC_EPCI_CLKRST_BC; out_be32(reg, val); /* PCI clock enable */ val = in_be32(reg); val |= SCC_EPCI_CLKRST_PCKEN; out_be32(reg, val); /* release PCI core reset (all) */ reg = epci_base + SCC_EPCI_CKCTRL; val = in_be32(reg); val |= (SCC_EPCI_CKCTRL_CRST0 | SCC_EPCI_CKCTRL_CRST1); out_be32(reg, val); /* set base translation registers. (already set by Beat) */ /* set base address masks. (already set by Beat) */ } /* release interrupt masks and clear all interrupts */ reg = epci_base + SCC_EPCI_INTSET; out_be32(reg, 0x013f011f); /* all interrupts enable */ reg = epci_base + SCC_EPCI_VIENAB; val = SCC_EPCI_VIENAB_PMPEE | SCC_EPCI_VIENAB_PMFEE; out_be32(reg, val); reg = epci_base + SCC_EPCI_STATUS; out_be32(reg, 0xffffffff); reg = epci_base + SCC_EPCI_VISTAT; out_be32(reg, 0xffffffff); /* disable PCI->IB address translation */ reg = epci_base + SCC_EPCI_VCSR; val = in_be32(reg); val &= ~(SCC_EPCI_VCSR_DR | SCC_EPCI_VCSR_AT); out_be32(reg, val); /* set base addresses. (no need to set?) */ /* memory space, bus master enable */ reg = epci_base + PCI_COMMAND; val = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; out_be32(reg, val); /* endian mode setup */ reg = epci_base + SCC_EPCI_ECMODE; val = 0x00550155; out_be32(reg, val); /* set control option */ reg = epci_base + SCC_EPCI_CNTOPT; val = in_be32(reg); val |= SCC_EPCI_CNTOPT_O2PMB; out_be32(reg, val); /* XXX: temporay: set registers for address conversion setup */ reg = epci_base + SCC_EPCI_CNF10_REG; out_be32(reg, 0x80000008); reg = epci_base + SCC_EPCI_CNF14_REG; out_be32(reg, 0x40000008); reg = epci_base + SCC_EPCI_BAM0; out_be32(reg, 0x80000000); reg = epci_base + SCC_EPCI_BAM1; out_be32(reg, 0xe0000000); reg = epci_base + SCC_EPCI_PVBAT; out_be32(reg, 0x80000000); if (!hwres) { /* release external PCI reset */ reg = epci_base + SCC_EPCI_CLKRST; val = in_be32(reg); val |= SCC_EPCI_CLKRST_PCIRST; out_be32(reg, val); } return 0; } static int __init celleb_setup_epci(struct device_node *node, struct pci_controller *hose) { struct resource r; pr_debug("PCI: celleb_setup_epci()\n"); /* * Note: * Celleb epci uses cfg_addr and cfg_data member of * pci_controller structure in irregular way. * * cfg_addr is used to map for control registers of * celleb epci. * * cfg_data is used for configuration area of devices * on Celleb epci buses. */ if (of_address_to_resource(node, 0, &r)) goto error; hose->cfg_addr = ioremap(r.start, resource_size(&r)); if (!hose->cfg_addr) goto error; pr_debug("EPCI: cfg_addr map 0x%016llx->0x%016lx + 0x%016llx\n", r.start, (unsigned long)hose->cfg_addr, resource_size(&r)); if (of_address_to_resource(node, 2, &r)) goto error; hose->cfg_data = ioremap(r.start, resource_size(&r)); if (!hose->cfg_data) goto error; pr_debug("EPCI: cfg_data map 0x%016llx->0x%016lx + 0x%016llx\n", r.start, (unsigned long)hose->cfg_data, resource_size(&r)); hose->ops = &celleb_epci_ops; celleb_epci_init(hose); return 0; error: if (hose->cfg_addr) iounmap(hose->cfg_addr); if (hose->cfg_data) iounmap(hose->cfg_data); return 1; } struct celleb_phb_spec celleb_epci_spec __initdata = { .setup = celleb_setup_epci, .ops = &spiderpci_ops, .iowa_init = &spiderpci_iowa_init, .iowa_data = (void *)0, };
gpl-2.0
teemodk/android_kernel_htc_endeavoru
fs/cifs/nterr.c
9887
34297
/* * Unix SMB/Netbios implementation. * Version 1.9. * RPC Pipe client / server routines * Copyright (C) Luke Kenneth Casson Leighton 1997-2001. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* NT error codes - see nterr.h */ #include <linux/types.h> #include <linux/fs.h> #include "nterr.h" const struct nt_err_code_struct nt_errs[] = { {"NT_STATUS_OK", NT_STATUS_OK}, {"NT_STATUS_UNSUCCESSFUL", NT_STATUS_UNSUCCESSFUL}, {"NT_STATUS_NOT_IMPLEMENTED", NT_STATUS_NOT_IMPLEMENTED}, {"NT_STATUS_INVALID_INFO_CLASS", NT_STATUS_INVALID_INFO_CLASS}, {"NT_STATUS_INFO_LENGTH_MISMATCH", NT_STATUS_INFO_LENGTH_MISMATCH}, {"NT_STATUS_ACCESS_VIOLATION", NT_STATUS_ACCESS_VIOLATION}, {"STATUS_BUFFER_OVERFLOW", STATUS_BUFFER_OVERFLOW}, {"NT_STATUS_IN_PAGE_ERROR", NT_STATUS_IN_PAGE_ERROR}, {"NT_STATUS_PAGEFILE_QUOTA", NT_STATUS_PAGEFILE_QUOTA}, {"NT_STATUS_INVALID_HANDLE", NT_STATUS_INVALID_HANDLE}, {"NT_STATUS_BAD_INITIAL_STACK", NT_STATUS_BAD_INITIAL_STACK}, {"NT_STATUS_BAD_INITIAL_PC", NT_STATUS_BAD_INITIAL_PC}, {"NT_STATUS_INVALID_CID", NT_STATUS_INVALID_CID}, {"NT_STATUS_TIMER_NOT_CANCELED", NT_STATUS_TIMER_NOT_CANCELED}, {"NT_STATUS_INVALID_PARAMETER", NT_STATUS_INVALID_PARAMETER}, {"NT_STATUS_NO_SUCH_DEVICE", NT_STATUS_NO_SUCH_DEVICE}, {"NT_STATUS_NO_SUCH_FILE", NT_STATUS_NO_SUCH_FILE}, {"NT_STATUS_INVALID_DEVICE_REQUEST", NT_STATUS_INVALID_DEVICE_REQUEST}, {"NT_STATUS_END_OF_FILE", NT_STATUS_END_OF_FILE}, {"NT_STATUS_WRONG_VOLUME", NT_STATUS_WRONG_VOLUME}, {"NT_STATUS_NO_MEDIA_IN_DEVICE", NT_STATUS_NO_MEDIA_IN_DEVICE}, {"NT_STATUS_UNRECOGNIZED_MEDIA", NT_STATUS_UNRECOGNIZED_MEDIA}, {"NT_STATUS_NONEXISTENT_SECTOR", NT_STATUS_NONEXISTENT_SECTOR}, {"NT_STATUS_MORE_PROCESSING_REQUIRED", NT_STATUS_MORE_PROCESSING_REQUIRED}, {"NT_STATUS_NO_MEMORY", NT_STATUS_NO_MEMORY}, {"NT_STATUS_CONFLICTING_ADDRESSES", NT_STATUS_CONFLICTING_ADDRESSES}, {"NT_STATUS_NOT_MAPPED_VIEW", NT_STATUS_NOT_MAPPED_VIEW}, {"NT_STATUS_UNABLE_TO_FREE_VM", NT_STATUS_UNABLE_TO_FREE_VM}, {"NT_STATUS_UNABLE_TO_DELETE_SECTION", NT_STATUS_UNABLE_TO_DELETE_SECTION}, {"NT_STATUS_INVALID_SYSTEM_SERVICE", NT_STATUS_INVALID_SYSTEM_SERVICE}, {"NT_STATUS_ILLEGAL_INSTRUCTION", NT_STATUS_ILLEGAL_INSTRUCTION}, {"NT_STATUS_INVALID_LOCK_SEQUENCE", NT_STATUS_INVALID_LOCK_SEQUENCE}, {"NT_STATUS_INVALID_VIEW_SIZE", NT_STATUS_INVALID_VIEW_SIZE}, {"NT_STATUS_INVALID_FILE_FOR_SECTION", NT_STATUS_INVALID_FILE_FOR_SECTION}, {"NT_STATUS_ALREADY_COMMITTED", NT_STATUS_ALREADY_COMMITTED}, {"NT_STATUS_ACCESS_DENIED", NT_STATUS_ACCESS_DENIED}, {"NT_STATUS_BUFFER_TOO_SMALL", NT_STATUS_BUFFER_TOO_SMALL}, {"NT_STATUS_OBJECT_TYPE_MISMATCH", NT_STATUS_OBJECT_TYPE_MISMATCH}, {"NT_STATUS_NONCONTINUABLE_EXCEPTION", NT_STATUS_NONCONTINUABLE_EXCEPTION}, {"NT_STATUS_INVALID_DISPOSITION", NT_STATUS_INVALID_DISPOSITION}, {"NT_STATUS_UNWIND", NT_STATUS_UNWIND}, {"NT_STATUS_BAD_STACK", NT_STATUS_BAD_STACK}, {"NT_STATUS_INVALID_UNWIND_TARGET", NT_STATUS_INVALID_UNWIND_TARGET}, {"NT_STATUS_NOT_LOCKED", NT_STATUS_NOT_LOCKED}, {"NT_STATUS_PARITY_ERROR", NT_STATUS_PARITY_ERROR}, {"NT_STATUS_UNABLE_TO_DECOMMIT_VM", NT_STATUS_UNABLE_TO_DECOMMIT_VM}, {"NT_STATUS_NOT_COMMITTED", NT_STATUS_NOT_COMMITTED}, {"NT_STATUS_INVALID_PORT_ATTRIBUTES", NT_STATUS_INVALID_PORT_ATTRIBUTES}, {"NT_STATUS_PORT_MESSAGE_TOO_LONG", NT_STATUS_PORT_MESSAGE_TOO_LONG}, {"NT_STATUS_INVALID_PARAMETER_MIX", NT_STATUS_INVALID_PARAMETER_MIX}, {"NT_STATUS_INVALID_QUOTA_LOWER", NT_STATUS_INVALID_QUOTA_LOWER}, {"NT_STATUS_DISK_CORRUPT_ERROR", NT_STATUS_DISK_CORRUPT_ERROR}, {"NT_STATUS_OBJECT_NAME_INVALID", NT_STATUS_OBJECT_NAME_INVALID}, {"NT_STATUS_OBJECT_NAME_NOT_FOUND", NT_STATUS_OBJECT_NAME_NOT_FOUND}, {"NT_STATUS_OBJECT_NAME_COLLISION", NT_STATUS_OBJECT_NAME_COLLISION}, {"NT_STATUS_HANDLE_NOT_WAITABLE", NT_STATUS_HANDLE_NOT_WAITABLE}, {"NT_STATUS_PORT_DISCONNECTED", NT_STATUS_PORT_DISCONNECTED}, {"NT_STATUS_DEVICE_ALREADY_ATTACHED", NT_STATUS_DEVICE_ALREADY_ATTACHED}, {"NT_STATUS_OBJECT_PATH_INVALID", NT_STATUS_OBJECT_PATH_INVALID}, {"NT_STATUS_OBJECT_PATH_NOT_FOUND", NT_STATUS_OBJECT_PATH_NOT_FOUND}, {"NT_STATUS_OBJECT_PATH_SYNTAX_BAD", NT_STATUS_OBJECT_PATH_SYNTAX_BAD}, {"NT_STATUS_DATA_OVERRUN", NT_STATUS_DATA_OVERRUN}, {"NT_STATUS_DATA_LATE_ERROR", NT_STATUS_DATA_LATE_ERROR}, {"NT_STATUS_DATA_ERROR", NT_STATUS_DATA_ERROR}, {"NT_STATUS_CRC_ERROR", NT_STATUS_CRC_ERROR}, {"NT_STATUS_SECTION_TOO_BIG", NT_STATUS_SECTION_TOO_BIG}, {"NT_STATUS_PORT_CONNECTION_REFUSED", NT_STATUS_PORT_CONNECTION_REFUSED}, {"NT_STATUS_INVALID_PORT_HANDLE", NT_STATUS_INVALID_PORT_HANDLE}, {"NT_STATUS_SHARING_VIOLATION", NT_STATUS_SHARING_VIOLATION}, {"NT_STATUS_QUOTA_EXCEEDED", NT_STATUS_QUOTA_EXCEEDED}, {"NT_STATUS_INVALID_PAGE_PROTECTION", NT_STATUS_INVALID_PAGE_PROTECTION}, {"NT_STATUS_MUTANT_NOT_OWNED", NT_STATUS_MUTANT_NOT_OWNED}, {"NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED", NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED}, {"NT_STATUS_PORT_ALREADY_SET", NT_STATUS_PORT_ALREADY_SET}, {"NT_STATUS_SECTION_NOT_IMAGE", NT_STATUS_SECTION_NOT_IMAGE}, {"NT_STATUS_SUSPEND_COUNT_EXCEEDED", NT_STATUS_SUSPEND_COUNT_EXCEEDED}, {"NT_STATUS_THREAD_IS_TERMINATING", NT_STATUS_THREAD_IS_TERMINATING}, {"NT_STATUS_BAD_WORKING_SET_LIMIT", NT_STATUS_BAD_WORKING_SET_LIMIT}, {"NT_STATUS_INCOMPATIBLE_FILE_MAP", NT_STATUS_INCOMPATIBLE_FILE_MAP}, {"NT_STATUS_SECTION_PROTECTION", NT_STATUS_SECTION_PROTECTION}, {"NT_STATUS_EAS_NOT_SUPPORTED", NT_STATUS_EAS_NOT_SUPPORTED}, {"NT_STATUS_EA_TOO_LARGE", NT_STATUS_EA_TOO_LARGE}, {"NT_STATUS_NONEXISTENT_EA_ENTRY", NT_STATUS_NONEXISTENT_EA_ENTRY}, {"NT_STATUS_NO_EAS_ON_FILE", NT_STATUS_NO_EAS_ON_FILE}, {"NT_STATUS_EA_CORRUPT_ERROR", NT_STATUS_EA_CORRUPT_ERROR}, {"NT_STATUS_FILE_LOCK_CONFLICT", NT_STATUS_FILE_LOCK_CONFLICT}, {"NT_STATUS_LOCK_NOT_GRANTED", NT_STATUS_LOCK_NOT_GRANTED}, {"NT_STATUS_DELETE_PENDING", NT_STATUS_DELETE_PENDING}, {"NT_STATUS_CTL_FILE_NOT_SUPPORTED", NT_STATUS_CTL_FILE_NOT_SUPPORTED}, {"NT_STATUS_UNKNOWN_REVISION", NT_STATUS_UNKNOWN_REVISION}, {"NT_STATUS_REVISION_MISMATCH", NT_STATUS_REVISION_MISMATCH}, {"NT_STATUS_INVALID_OWNER", NT_STATUS_INVALID_OWNER}, {"NT_STATUS_INVALID_PRIMARY_GROUP", NT_STATUS_INVALID_PRIMARY_GROUP}, {"NT_STATUS_NO_IMPERSONATION_TOKEN", NT_STATUS_NO_IMPERSONATION_TOKEN}, {"NT_STATUS_CANT_DISABLE_MANDATORY", NT_STATUS_CANT_DISABLE_MANDATORY}, {"NT_STATUS_NO_LOGON_SERVERS", NT_STATUS_NO_LOGON_SERVERS}, {"NT_STATUS_NO_SUCH_LOGON_SESSION", NT_STATUS_NO_SUCH_LOGON_SESSION}, {"NT_STATUS_NO_SUCH_PRIVILEGE", NT_STATUS_NO_SUCH_PRIVILEGE}, {"NT_STATUS_PRIVILEGE_NOT_HELD", NT_STATUS_PRIVILEGE_NOT_HELD}, {"NT_STATUS_INVALID_ACCOUNT_NAME", NT_STATUS_INVALID_ACCOUNT_NAME}, {"NT_STATUS_USER_EXISTS", NT_STATUS_USER_EXISTS}, {"NT_STATUS_NO_SUCH_USER", NT_STATUS_NO_SUCH_USER}, {"NT_STATUS_GROUP_EXISTS", NT_STATUS_GROUP_EXISTS}, {"NT_STATUS_NO_SUCH_GROUP", NT_STATUS_NO_SUCH_GROUP}, {"NT_STATUS_MEMBER_IN_GROUP", NT_STATUS_MEMBER_IN_GROUP}, {"NT_STATUS_MEMBER_NOT_IN_GROUP", NT_STATUS_MEMBER_NOT_IN_GROUP}, {"NT_STATUS_LAST_ADMIN", NT_STATUS_LAST_ADMIN}, {"NT_STATUS_WRONG_PASSWORD", NT_STATUS_WRONG_PASSWORD}, {"NT_STATUS_ILL_FORMED_PASSWORD", NT_STATUS_ILL_FORMED_PASSWORD}, {"NT_STATUS_PASSWORD_RESTRICTION", NT_STATUS_PASSWORD_RESTRICTION}, {"NT_STATUS_LOGON_FAILURE", NT_STATUS_LOGON_FAILURE}, {"NT_STATUS_ACCOUNT_RESTRICTION", NT_STATUS_ACCOUNT_RESTRICTION}, {"NT_STATUS_INVALID_LOGON_HOURS", NT_STATUS_INVALID_LOGON_HOURS}, {"NT_STATUS_INVALID_WORKSTATION", NT_STATUS_INVALID_WORKSTATION}, {"NT_STATUS_PASSWORD_EXPIRED", NT_STATUS_PASSWORD_EXPIRED}, {"NT_STATUS_ACCOUNT_DISABLED", NT_STATUS_ACCOUNT_DISABLED}, {"NT_STATUS_NONE_MAPPED", NT_STATUS_NONE_MAPPED}, {"NT_STATUS_TOO_MANY_LUIDS_REQUESTED", NT_STATUS_TOO_MANY_LUIDS_REQUESTED}, {"NT_STATUS_LUIDS_EXHAUSTED", NT_STATUS_LUIDS_EXHAUSTED}, {"NT_STATUS_INVALID_SUB_AUTHORITY", NT_STATUS_INVALID_SUB_AUTHORITY}, {"NT_STATUS_INVALID_ACL", NT_STATUS_INVALID_ACL}, {"NT_STATUS_INVALID_SID", NT_STATUS_INVALID_SID}, {"NT_STATUS_INVALID_SECURITY_DESCR", NT_STATUS_INVALID_SECURITY_DESCR}, {"NT_STATUS_PROCEDURE_NOT_FOUND", NT_STATUS_PROCEDURE_NOT_FOUND}, {"NT_STATUS_INVALID_IMAGE_FORMAT", NT_STATUS_INVALID_IMAGE_FORMAT}, {"NT_STATUS_NO_TOKEN", NT_STATUS_NO_TOKEN}, {"NT_STATUS_BAD_INHERITANCE_ACL", NT_STATUS_BAD_INHERITANCE_ACL}, {"NT_STATUS_RANGE_NOT_LOCKED", NT_STATUS_RANGE_NOT_LOCKED}, {"NT_STATUS_DISK_FULL", NT_STATUS_DISK_FULL}, {"NT_STATUS_SERVER_DISABLED", NT_STATUS_SERVER_DISABLED}, {"NT_STATUS_SERVER_NOT_DISABLED", NT_STATUS_SERVER_NOT_DISABLED}, {"NT_STATUS_TOO_MANY_GUIDS_REQUESTED", NT_STATUS_TOO_MANY_GUIDS_REQUESTED}, {"NT_STATUS_GUIDS_EXHAUSTED", NT_STATUS_GUIDS_EXHAUSTED}, {"NT_STATUS_INVALID_ID_AUTHORITY", NT_STATUS_INVALID_ID_AUTHORITY}, {"NT_STATUS_AGENTS_EXHAUSTED", NT_STATUS_AGENTS_EXHAUSTED}, {"NT_STATUS_INVALID_VOLUME_LABEL", NT_STATUS_INVALID_VOLUME_LABEL}, {"NT_STATUS_SECTION_NOT_EXTENDED", NT_STATUS_SECTION_NOT_EXTENDED}, {"NT_STATUS_NOT_MAPPED_DATA", NT_STATUS_NOT_MAPPED_DATA}, {"NT_STATUS_RESOURCE_DATA_NOT_FOUND", NT_STATUS_RESOURCE_DATA_NOT_FOUND}, {"NT_STATUS_RESOURCE_TYPE_NOT_FOUND", NT_STATUS_RESOURCE_TYPE_NOT_FOUND}, {"NT_STATUS_RESOURCE_NAME_NOT_FOUND", NT_STATUS_RESOURCE_NAME_NOT_FOUND}, {"NT_STATUS_ARRAY_BOUNDS_EXCEEDED", NT_STATUS_ARRAY_BOUNDS_EXCEEDED}, {"NT_STATUS_FLOAT_DENORMAL_OPERAND", NT_STATUS_FLOAT_DENORMAL_OPERAND}, {"NT_STATUS_FLOAT_DIVIDE_BY_ZERO", NT_STATUS_FLOAT_DIVIDE_BY_ZERO}, {"NT_STATUS_FLOAT_INEXACT_RESULT", NT_STATUS_FLOAT_INEXACT_RESULT}, {"NT_STATUS_FLOAT_INVALID_OPERATION", NT_STATUS_FLOAT_INVALID_OPERATION}, {"NT_STATUS_FLOAT_OVERFLOW", NT_STATUS_FLOAT_OVERFLOW}, {"NT_STATUS_FLOAT_STACK_CHECK", NT_STATUS_FLOAT_STACK_CHECK}, {"NT_STATUS_FLOAT_UNDERFLOW", NT_STATUS_FLOAT_UNDERFLOW}, {"NT_STATUS_INTEGER_DIVIDE_BY_ZERO", NT_STATUS_INTEGER_DIVIDE_BY_ZERO}, {"NT_STATUS_INTEGER_OVERFLOW", NT_STATUS_INTEGER_OVERFLOW}, {"NT_STATUS_PRIVILEGED_INSTRUCTION", NT_STATUS_PRIVILEGED_INSTRUCTION}, {"NT_STATUS_TOO_MANY_PAGING_FILES", NT_STATUS_TOO_MANY_PAGING_FILES}, {"NT_STATUS_FILE_INVALID", NT_STATUS_FILE_INVALID}, {"NT_STATUS_ALLOTTED_SPACE_EXCEEDED", NT_STATUS_ALLOTTED_SPACE_EXCEEDED}, {"NT_STATUS_INSUFFICIENT_RESOURCES", NT_STATUS_INSUFFICIENT_RESOURCES}, {"NT_STATUS_DFS_EXIT_PATH_FOUND", NT_STATUS_DFS_EXIT_PATH_FOUND}, {"NT_STATUS_DEVICE_DATA_ERROR", NT_STATUS_DEVICE_DATA_ERROR}, {"NT_STATUS_DEVICE_NOT_CONNECTED", NT_STATUS_DEVICE_NOT_CONNECTED}, {"NT_STATUS_DEVICE_POWER_FAILURE", NT_STATUS_DEVICE_POWER_FAILURE}, {"NT_STATUS_FREE_VM_NOT_AT_BASE", NT_STATUS_FREE_VM_NOT_AT_BASE}, {"NT_STATUS_MEMORY_NOT_ALLOCATED", NT_STATUS_MEMORY_NOT_ALLOCATED}, {"NT_STATUS_WORKING_SET_QUOTA", NT_STATUS_WORKING_SET_QUOTA}, {"NT_STATUS_MEDIA_WRITE_PROTECTED", NT_STATUS_MEDIA_WRITE_PROTECTED}, {"NT_STATUS_DEVICE_NOT_READY", NT_STATUS_DEVICE_NOT_READY}, {"NT_STATUS_INVALID_GROUP_ATTRIBUTES", NT_STATUS_INVALID_GROUP_ATTRIBUTES}, {"NT_STATUS_BAD_IMPERSONATION_LEVEL", NT_STATUS_BAD_IMPERSONATION_LEVEL}, {"NT_STATUS_CANT_OPEN_ANONYMOUS", NT_STATUS_CANT_OPEN_ANONYMOUS}, {"NT_STATUS_BAD_VALIDATION_CLASS", NT_STATUS_BAD_VALIDATION_CLASS}, {"NT_STATUS_BAD_TOKEN_TYPE", NT_STATUS_BAD_TOKEN_TYPE}, {"NT_STATUS_BAD_MASTER_BOOT_RECORD", NT_STATUS_BAD_MASTER_BOOT_RECORD}, {"NT_STATUS_INSTRUCTION_MISALIGNMENT", NT_STATUS_INSTRUCTION_MISALIGNMENT}, {"NT_STATUS_INSTANCE_NOT_AVAILABLE", NT_STATUS_INSTANCE_NOT_AVAILABLE}, {"NT_STATUS_PIPE_NOT_AVAILABLE", NT_STATUS_PIPE_NOT_AVAILABLE}, {"NT_STATUS_INVALID_PIPE_STATE", NT_STATUS_INVALID_PIPE_STATE}, {"NT_STATUS_PIPE_BUSY", NT_STATUS_PIPE_BUSY}, {"NT_STATUS_ILLEGAL_FUNCTION", NT_STATUS_ILLEGAL_FUNCTION}, {"NT_STATUS_PIPE_DISCONNECTED", NT_STATUS_PIPE_DISCONNECTED}, {"NT_STATUS_PIPE_CLOSING", NT_STATUS_PIPE_CLOSING}, {"NT_STATUS_PIPE_CONNECTED", NT_STATUS_PIPE_CONNECTED}, {"NT_STATUS_PIPE_LISTENING", NT_STATUS_PIPE_LISTENING}, {"NT_STATUS_INVALID_READ_MODE", NT_STATUS_INVALID_READ_MODE}, {"NT_STATUS_IO_TIMEOUT", NT_STATUS_IO_TIMEOUT}, {"NT_STATUS_FILE_FORCED_CLOSED", NT_STATUS_FILE_FORCED_CLOSED}, {"NT_STATUS_PROFILING_NOT_STARTED", NT_STATUS_PROFILING_NOT_STARTED}, {"NT_STATUS_PROFILING_NOT_STOPPED", NT_STATUS_PROFILING_NOT_STOPPED}, {"NT_STATUS_COULD_NOT_INTERPRET", NT_STATUS_COULD_NOT_INTERPRET}, {"NT_STATUS_FILE_IS_A_DIRECTORY", NT_STATUS_FILE_IS_A_DIRECTORY}, {"NT_STATUS_NOT_SUPPORTED", NT_STATUS_NOT_SUPPORTED}, {"NT_STATUS_REMOTE_NOT_LISTENING", NT_STATUS_REMOTE_NOT_LISTENING}, {"NT_STATUS_DUPLICATE_NAME", NT_STATUS_DUPLICATE_NAME}, {"NT_STATUS_BAD_NETWORK_PATH", NT_STATUS_BAD_NETWORK_PATH}, {"NT_STATUS_NETWORK_BUSY", NT_STATUS_NETWORK_BUSY}, {"NT_STATUS_DEVICE_DOES_NOT_EXIST", NT_STATUS_DEVICE_DOES_NOT_EXIST}, {"NT_STATUS_TOO_MANY_COMMANDS", NT_STATUS_TOO_MANY_COMMANDS}, {"NT_STATUS_ADAPTER_HARDWARE_ERROR", NT_STATUS_ADAPTER_HARDWARE_ERROR}, {"NT_STATUS_INVALID_NETWORK_RESPONSE", NT_STATUS_INVALID_NETWORK_RESPONSE}, {"NT_STATUS_UNEXPECTED_NETWORK_ERROR", NT_STATUS_UNEXPECTED_NETWORK_ERROR}, {"NT_STATUS_BAD_REMOTE_ADAPTER", NT_STATUS_BAD_REMOTE_ADAPTER}, {"NT_STATUS_PRINT_QUEUE_FULL", NT_STATUS_PRINT_QUEUE_FULL}, {"NT_STATUS_NO_SPOOL_SPACE", NT_STATUS_NO_SPOOL_SPACE}, {"NT_STATUS_PRINT_CANCELLED", NT_STATUS_PRINT_CANCELLED}, {"NT_STATUS_NETWORK_NAME_DELETED", NT_STATUS_NETWORK_NAME_DELETED}, {"NT_STATUS_NETWORK_ACCESS_DENIED", NT_STATUS_NETWORK_ACCESS_DENIED}, {"NT_STATUS_BAD_DEVICE_TYPE", NT_STATUS_BAD_DEVICE_TYPE}, {"NT_STATUS_BAD_NETWORK_NAME", NT_STATUS_BAD_NETWORK_NAME}, {"NT_STATUS_TOO_MANY_NAMES", NT_STATUS_TOO_MANY_NAMES}, {"NT_STATUS_TOO_MANY_SESSIONS", NT_STATUS_TOO_MANY_SESSIONS}, {"NT_STATUS_SHARING_PAUSED", NT_STATUS_SHARING_PAUSED}, {"NT_STATUS_REQUEST_NOT_ACCEPTED", NT_STATUS_REQUEST_NOT_ACCEPTED}, {"NT_STATUS_REDIRECTOR_PAUSED", NT_STATUS_REDIRECTOR_PAUSED}, {"NT_STATUS_NET_WRITE_FAULT", NT_STATUS_NET_WRITE_FAULT}, {"NT_STATUS_PROFILING_AT_LIMIT", NT_STATUS_PROFILING_AT_LIMIT}, {"NT_STATUS_NOT_SAME_DEVICE", NT_STATUS_NOT_SAME_DEVICE}, {"NT_STATUS_FILE_RENAMED", NT_STATUS_FILE_RENAMED}, {"NT_STATUS_VIRTUAL_CIRCUIT_CLOSED", NT_STATUS_VIRTUAL_CIRCUIT_CLOSED}, {"NT_STATUS_NO_SECURITY_ON_OBJECT", NT_STATUS_NO_SECURITY_ON_OBJECT}, {"NT_STATUS_CANT_WAIT", NT_STATUS_CANT_WAIT}, {"NT_STATUS_PIPE_EMPTY", NT_STATUS_PIPE_EMPTY}, {"NT_STATUS_CANT_ACCESS_DOMAIN_INFO", NT_STATUS_CANT_ACCESS_DOMAIN_INFO}, {"NT_STATUS_CANT_TERMINATE_SELF", NT_STATUS_CANT_TERMINATE_SELF}, {"NT_STATUS_INVALID_SERVER_STATE", NT_STATUS_INVALID_SERVER_STATE}, {"NT_STATUS_INVALID_DOMAIN_STATE", NT_STATUS_INVALID_DOMAIN_STATE}, {"NT_STATUS_INVALID_DOMAIN_ROLE", NT_STATUS_INVALID_DOMAIN_ROLE}, {"NT_STATUS_NO_SUCH_DOMAIN", NT_STATUS_NO_SUCH_DOMAIN}, {"NT_STATUS_DOMAIN_EXISTS", NT_STATUS_DOMAIN_EXISTS}, {"NT_STATUS_DOMAIN_LIMIT_EXCEEDED", NT_STATUS_DOMAIN_LIMIT_EXCEEDED}, {"NT_STATUS_OPLOCK_NOT_GRANTED", NT_STATUS_OPLOCK_NOT_GRANTED}, {"NT_STATUS_INVALID_OPLOCK_PROTOCOL", NT_STATUS_INVALID_OPLOCK_PROTOCOL}, {"NT_STATUS_INTERNAL_DB_CORRUPTION", NT_STATUS_INTERNAL_DB_CORRUPTION}, {"NT_STATUS_INTERNAL_ERROR", NT_STATUS_INTERNAL_ERROR}, {"NT_STATUS_GENERIC_NOT_MAPPED", NT_STATUS_GENERIC_NOT_MAPPED}, {"NT_STATUS_BAD_DESCRIPTOR_FORMAT", NT_STATUS_BAD_DESCRIPTOR_FORMAT}, {"NT_STATUS_INVALID_USER_BUFFER", NT_STATUS_INVALID_USER_BUFFER}, {"NT_STATUS_UNEXPECTED_IO_ERROR", NT_STATUS_UNEXPECTED_IO_ERROR}, {"NT_STATUS_UNEXPECTED_MM_CREATE_ERR", NT_STATUS_UNEXPECTED_MM_CREATE_ERR}, {"NT_STATUS_UNEXPECTED_MM_MAP_ERROR", NT_STATUS_UNEXPECTED_MM_MAP_ERROR}, {"NT_STATUS_UNEXPECTED_MM_EXTEND_ERR", NT_STATUS_UNEXPECTED_MM_EXTEND_ERR}, {"NT_STATUS_NOT_LOGON_PROCESS", NT_STATUS_NOT_LOGON_PROCESS}, {"NT_STATUS_LOGON_SESSION_EXISTS", NT_STATUS_LOGON_SESSION_EXISTS}, {"NT_STATUS_INVALID_PARAMETER_1", NT_STATUS_INVALID_PARAMETER_1}, {"NT_STATUS_INVALID_PARAMETER_2", NT_STATUS_INVALID_PARAMETER_2}, {"NT_STATUS_INVALID_PARAMETER_3", NT_STATUS_INVALID_PARAMETER_3}, {"NT_STATUS_INVALID_PARAMETER_4", NT_STATUS_INVALID_PARAMETER_4}, {"NT_STATUS_INVALID_PARAMETER_5", NT_STATUS_INVALID_PARAMETER_5}, {"NT_STATUS_INVALID_PARAMETER_6", NT_STATUS_INVALID_PARAMETER_6}, {"NT_STATUS_INVALID_PARAMETER_7", NT_STATUS_INVALID_PARAMETER_7}, {"NT_STATUS_INVALID_PARAMETER_8", NT_STATUS_INVALID_PARAMETER_8}, {"NT_STATUS_INVALID_PARAMETER_9", NT_STATUS_INVALID_PARAMETER_9}, {"NT_STATUS_INVALID_PARAMETER_10", NT_STATUS_INVALID_PARAMETER_10}, {"NT_STATUS_INVALID_PARAMETER_11", NT_STATUS_INVALID_PARAMETER_11}, {"NT_STATUS_INVALID_PARAMETER_12", NT_STATUS_INVALID_PARAMETER_12}, {"NT_STATUS_REDIRECTOR_NOT_STARTED", NT_STATUS_REDIRECTOR_NOT_STARTED}, {"NT_STATUS_REDIRECTOR_STARTED", NT_STATUS_REDIRECTOR_STARTED}, {"NT_STATUS_STACK_OVERFLOW", NT_STATUS_STACK_OVERFLOW}, {"NT_STATUS_NO_SUCH_PACKAGE", NT_STATUS_NO_SUCH_PACKAGE}, {"NT_STATUS_BAD_FUNCTION_TABLE", NT_STATUS_BAD_FUNCTION_TABLE}, {"NT_STATUS_DIRECTORY_NOT_EMPTY", NT_STATUS_DIRECTORY_NOT_EMPTY}, {"NT_STATUS_FILE_CORRUPT_ERROR", NT_STATUS_FILE_CORRUPT_ERROR}, {"NT_STATUS_NOT_A_DIRECTORY", NT_STATUS_NOT_A_DIRECTORY}, {"NT_STATUS_BAD_LOGON_SESSION_STATE", NT_STATUS_BAD_LOGON_SESSION_STATE}, {"NT_STATUS_LOGON_SESSION_COLLISION", NT_STATUS_LOGON_SESSION_COLLISION}, {"NT_STATUS_NAME_TOO_LONG", NT_STATUS_NAME_TOO_LONG}, {"NT_STATUS_FILES_OPEN", NT_STATUS_FILES_OPEN}, {"NT_STATUS_CONNECTION_IN_USE", NT_STATUS_CONNECTION_IN_USE}, {"NT_STATUS_MESSAGE_NOT_FOUND", NT_STATUS_MESSAGE_NOT_FOUND}, {"NT_STATUS_PROCESS_IS_TERMINATING", NT_STATUS_PROCESS_IS_TERMINATING}, {"NT_STATUS_INVALID_LOGON_TYPE", NT_STATUS_INVALID_LOGON_TYPE}, {"NT_STATUS_NO_GUID_TRANSLATION", NT_STATUS_NO_GUID_TRANSLATION}, {"NT_STATUS_CANNOT_IMPERSONATE", NT_STATUS_CANNOT_IMPERSONATE}, {"NT_STATUS_IMAGE_ALREADY_LOADED", NT_STATUS_IMAGE_ALREADY_LOADED}, {"NT_STATUS_ABIOS_NOT_PRESENT", NT_STATUS_ABIOS_NOT_PRESENT}, {"NT_STATUS_ABIOS_LID_NOT_EXIST", NT_STATUS_ABIOS_LID_NOT_EXIST}, {"NT_STATUS_ABIOS_LID_ALREADY_OWNED", NT_STATUS_ABIOS_LID_ALREADY_OWNED}, {"NT_STATUS_ABIOS_NOT_LID_OWNER", NT_STATUS_ABIOS_NOT_LID_OWNER}, {"NT_STATUS_ABIOS_INVALID_COMMAND", NT_STATUS_ABIOS_INVALID_COMMAND}, {"NT_STATUS_ABIOS_INVALID_LID", NT_STATUS_ABIOS_INVALID_LID}, {"NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE", NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE}, {"NT_STATUS_ABIOS_INVALID_SELECTOR", NT_STATUS_ABIOS_INVALID_SELECTOR}, {"NT_STATUS_NO_LDT", NT_STATUS_NO_LDT}, {"NT_STATUS_INVALID_LDT_SIZE", NT_STATUS_INVALID_LDT_SIZE}, {"NT_STATUS_INVALID_LDT_OFFSET", NT_STATUS_INVALID_LDT_OFFSET}, {"NT_STATUS_INVALID_LDT_DESCRIPTOR", NT_STATUS_INVALID_LDT_DESCRIPTOR}, {"NT_STATUS_INVALID_IMAGE_NE_FORMAT", NT_STATUS_INVALID_IMAGE_NE_FORMAT}, {"NT_STATUS_RXACT_INVALID_STATE", NT_STATUS_RXACT_INVALID_STATE}, {"NT_STATUS_RXACT_COMMIT_FAILURE", NT_STATUS_RXACT_COMMIT_FAILURE}, {"NT_STATUS_MAPPED_FILE_SIZE_ZERO", NT_STATUS_MAPPED_FILE_SIZE_ZERO}, {"NT_STATUS_TOO_MANY_OPENED_FILES", NT_STATUS_TOO_MANY_OPENED_FILES}, {"NT_STATUS_CANCELLED", NT_STATUS_CANCELLED}, {"NT_STATUS_CANNOT_DELETE", NT_STATUS_CANNOT_DELETE}, {"NT_STATUS_INVALID_COMPUTER_NAME", NT_STATUS_INVALID_COMPUTER_NAME}, {"NT_STATUS_FILE_DELETED", NT_STATUS_FILE_DELETED}, {"NT_STATUS_SPECIAL_ACCOUNT", NT_STATUS_SPECIAL_ACCOUNT}, {"NT_STATUS_SPECIAL_GROUP", NT_STATUS_SPECIAL_GROUP}, {"NT_STATUS_SPECIAL_USER", NT_STATUS_SPECIAL_USER}, {"NT_STATUS_MEMBERS_PRIMARY_GROUP", NT_STATUS_MEMBERS_PRIMARY_GROUP}, {"NT_STATUS_FILE_CLOSED", NT_STATUS_FILE_CLOSED}, {"NT_STATUS_TOO_MANY_THREADS", NT_STATUS_TOO_MANY_THREADS}, {"NT_STATUS_THREAD_NOT_IN_PROCESS", NT_STATUS_THREAD_NOT_IN_PROCESS}, {"NT_STATUS_TOKEN_ALREADY_IN_USE", NT_STATUS_TOKEN_ALREADY_IN_USE}, {"NT_STATUS_PAGEFILE_QUOTA_EXCEEDED", NT_STATUS_PAGEFILE_QUOTA_EXCEEDED}, {"NT_STATUS_COMMITMENT_LIMIT", NT_STATUS_COMMITMENT_LIMIT}, {"NT_STATUS_INVALID_IMAGE_LE_FORMAT", NT_STATUS_INVALID_IMAGE_LE_FORMAT}, {"NT_STATUS_INVALID_IMAGE_NOT_MZ", NT_STATUS_INVALID_IMAGE_NOT_MZ}, {"NT_STATUS_INVALID_IMAGE_PROTECT", NT_STATUS_INVALID_IMAGE_PROTECT}, {"NT_STATUS_INVALID_IMAGE_WIN_16", NT_STATUS_INVALID_IMAGE_WIN_16}, {"NT_STATUS_LOGON_SERVER_CONFLICT", NT_STATUS_LOGON_SERVER_CONFLICT}, {"NT_STATUS_TIME_DIFFERENCE_AT_DC", NT_STATUS_TIME_DIFFERENCE_AT_DC}, {"NT_STATUS_SYNCHRONIZATION_REQUIRED", NT_STATUS_SYNCHRONIZATION_REQUIRED}, {"NT_STATUS_DLL_NOT_FOUND", NT_STATUS_DLL_NOT_FOUND}, {"NT_STATUS_OPEN_FAILED", NT_STATUS_OPEN_FAILED}, {"NT_STATUS_IO_PRIVILEGE_FAILED", NT_STATUS_IO_PRIVILEGE_FAILED}, {"NT_STATUS_ORDINAL_NOT_FOUND", NT_STATUS_ORDINAL_NOT_FOUND}, {"NT_STATUS_ENTRYPOINT_NOT_FOUND", NT_STATUS_ENTRYPOINT_NOT_FOUND}, {"NT_STATUS_CONTROL_C_EXIT", NT_STATUS_CONTROL_C_EXIT}, {"NT_STATUS_LOCAL_DISCONNECT", NT_STATUS_LOCAL_DISCONNECT}, {"NT_STATUS_REMOTE_DISCONNECT", NT_STATUS_REMOTE_DISCONNECT}, {"NT_STATUS_REMOTE_RESOURCES", NT_STATUS_REMOTE_RESOURCES}, {"NT_STATUS_LINK_FAILED", NT_STATUS_LINK_FAILED}, {"NT_STATUS_LINK_TIMEOUT", NT_STATUS_LINK_TIMEOUT}, {"NT_STATUS_INVALID_CONNECTION", NT_STATUS_INVALID_CONNECTION}, {"NT_STATUS_INVALID_ADDRESS", NT_STATUS_INVALID_ADDRESS}, {"NT_STATUS_DLL_INIT_FAILED", NT_STATUS_DLL_INIT_FAILED}, {"NT_STATUS_MISSING_SYSTEMFILE", NT_STATUS_MISSING_SYSTEMFILE}, {"NT_STATUS_UNHANDLED_EXCEPTION", NT_STATUS_UNHANDLED_EXCEPTION}, {"NT_STATUS_APP_INIT_FAILURE", NT_STATUS_APP_INIT_FAILURE}, {"NT_STATUS_PAGEFILE_CREATE_FAILED", NT_STATUS_PAGEFILE_CREATE_FAILED}, {"NT_STATUS_NO_PAGEFILE", NT_STATUS_NO_PAGEFILE}, {"NT_STATUS_INVALID_LEVEL", NT_STATUS_INVALID_LEVEL}, {"NT_STATUS_WRONG_PASSWORD_CORE", NT_STATUS_WRONG_PASSWORD_CORE}, {"NT_STATUS_ILLEGAL_FLOAT_CONTEXT", NT_STATUS_ILLEGAL_FLOAT_CONTEXT}, {"NT_STATUS_PIPE_BROKEN", NT_STATUS_PIPE_BROKEN}, {"NT_STATUS_REGISTRY_CORRUPT", NT_STATUS_REGISTRY_CORRUPT}, {"NT_STATUS_REGISTRY_IO_FAILED", NT_STATUS_REGISTRY_IO_FAILED}, {"NT_STATUS_NO_EVENT_PAIR", NT_STATUS_NO_EVENT_PAIR}, {"NT_STATUS_UNRECOGNIZED_VOLUME", NT_STATUS_UNRECOGNIZED_VOLUME}, {"NT_STATUS_SERIAL_NO_DEVICE_INITED", NT_STATUS_SERIAL_NO_DEVICE_INITED}, {"NT_STATUS_NO_SUCH_ALIAS", NT_STATUS_NO_SUCH_ALIAS}, {"NT_STATUS_MEMBER_NOT_IN_ALIAS", NT_STATUS_MEMBER_NOT_IN_ALIAS}, {"NT_STATUS_MEMBER_IN_ALIAS", NT_STATUS_MEMBER_IN_ALIAS}, {"NT_STATUS_ALIAS_EXISTS", NT_STATUS_ALIAS_EXISTS}, {"NT_STATUS_LOGON_NOT_GRANTED", NT_STATUS_LOGON_NOT_GRANTED}, {"NT_STATUS_TOO_MANY_SECRETS", NT_STATUS_TOO_MANY_SECRETS}, {"NT_STATUS_SECRET_TOO_LONG", NT_STATUS_SECRET_TOO_LONG}, {"NT_STATUS_INTERNAL_DB_ERROR", NT_STATUS_INTERNAL_DB_ERROR}, {"NT_STATUS_FULLSCREEN_MODE", NT_STATUS_FULLSCREEN_MODE}, {"NT_STATUS_TOO_MANY_CONTEXT_IDS", NT_STATUS_TOO_MANY_CONTEXT_IDS}, {"NT_STATUS_LOGON_TYPE_NOT_GRANTED", NT_STATUS_LOGON_TYPE_NOT_GRANTED}, {"NT_STATUS_NOT_REGISTRY_FILE", NT_STATUS_NOT_REGISTRY_FILE}, {"NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED", NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED}, {"NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR", NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR}, {"NT_STATUS_FT_MISSING_MEMBER", NT_STATUS_FT_MISSING_MEMBER}, {"NT_STATUS_ILL_FORMED_SERVICE_ENTRY", NT_STATUS_ILL_FORMED_SERVICE_ENTRY}, {"NT_STATUS_ILLEGAL_CHARACTER", NT_STATUS_ILLEGAL_CHARACTER}, {"NT_STATUS_UNMAPPABLE_CHARACTER", NT_STATUS_UNMAPPABLE_CHARACTER}, {"NT_STATUS_UNDEFINED_CHARACTER", NT_STATUS_UNDEFINED_CHARACTER}, {"NT_STATUS_FLOPPY_VOLUME", NT_STATUS_FLOPPY_VOLUME}, {"NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND", NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND}, {"NT_STATUS_FLOPPY_WRONG_CYLINDER", NT_STATUS_FLOPPY_WRONG_CYLINDER}, {"NT_STATUS_FLOPPY_UNKNOWN_ERROR", NT_STATUS_FLOPPY_UNKNOWN_ERROR}, {"NT_STATUS_FLOPPY_BAD_REGISTERS", NT_STATUS_FLOPPY_BAD_REGISTERS}, {"NT_STATUS_DISK_RECALIBRATE_FAILED", NT_STATUS_DISK_RECALIBRATE_FAILED}, {"NT_STATUS_DISK_OPERATION_FAILED", NT_STATUS_DISK_OPERATION_FAILED}, {"NT_STATUS_DISK_RESET_FAILED", NT_STATUS_DISK_RESET_FAILED}, {"NT_STATUS_SHARED_IRQ_BUSY", NT_STATUS_SHARED_IRQ_BUSY}, {"NT_STATUS_FT_ORPHANING", NT_STATUS_FT_ORPHANING}, {"NT_STATUS_PARTITION_FAILURE", NT_STATUS_PARTITION_FAILURE}, {"NT_STATUS_INVALID_BLOCK_LENGTH", NT_STATUS_INVALID_BLOCK_LENGTH}, {"NT_STATUS_DEVICE_NOT_PARTITIONED", NT_STATUS_DEVICE_NOT_PARTITIONED}, {"NT_STATUS_UNABLE_TO_LOCK_MEDIA", NT_STATUS_UNABLE_TO_LOCK_MEDIA}, {"NT_STATUS_UNABLE_TO_UNLOAD_MEDIA", NT_STATUS_UNABLE_TO_UNLOAD_MEDIA}, {"NT_STATUS_EOM_OVERFLOW", NT_STATUS_EOM_OVERFLOW}, {"NT_STATUS_NO_MEDIA", NT_STATUS_NO_MEDIA}, {"NT_STATUS_NO_SUCH_MEMBER", NT_STATUS_NO_SUCH_MEMBER}, {"NT_STATUS_INVALID_MEMBER", NT_STATUS_INVALID_MEMBER}, {"NT_STATUS_KEY_DELETED", NT_STATUS_KEY_DELETED}, {"NT_STATUS_NO_LOG_SPACE", NT_STATUS_NO_LOG_SPACE}, {"NT_STATUS_TOO_MANY_SIDS", NT_STATUS_TOO_MANY_SIDS}, {"NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED", NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED}, {"NT_STATUS_KEY_HAS_CHILDREN", NT_STATUS_KEY_HAS_CHILDREN}, {"NT_STATUS_CHILD_MUST_BE_VOLATILE", NT_STATUS_CHILD_MUST_BE_VOLATILE}, {"NT_STATUS_DEVICE_CONFIGURATION_ERROR", NT_STATUS_DEVICE_CONFIGURATION_ERROR}, {"NT_STATUS_DRIVER_INTERNAL_ERROR", NT_STATUS_DRIVER_INTERNAL_ERROR}, {"NT_STATUS_INVALID_DEVICE_STATE", NT_STATUS_INVALID_DEVICE_STATE}, {"NT_STATUS_IO_DEVICE_ERROR", NT_STATUS_IO_DEVICE_ERROR}, {"NT_STATUS_DEVICE_PROTOCOL_ERROR", NT_STATUS_DEVICE_PROTOCOL_ERROR}, {"NT_STATUS_BACKUP_CONTROLLER", NT_STATUS_BACKUP_CONTROLLER}, {"NT_STATUS_LOG_FILE_FULL", NT_STATUS_LOG_FILE_FULL}, {"NT_STATUS_TOO_LATE", NT_STATUS_TOO_LATE}, {"NT_STATUS_NO_TRUST_LSA_SECRET", NT_STATUS_NO_TRUST_LSA_SECRET}, {"NT_STATUS_NO_TRUST_SAM_ACCOUNT", NT_STATUS_NO_TRUST_SAM_ACCOUNT}, {"NT_STATUS_TRUSTED_DOMAIN_FAILURE", NT_STATUS_TRUSTED_DOMAIN_FAILURE}, {"NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE", NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE}, {"NT_STATUS_EVENTLOG_FILE_CORRUPT", NT_STATUS_EVENTLOG_FILE_CORRUPT}, {"NT_STATUS_EVENTLOG_CANT_START", NT_STATUS_EVENTLOG_CANT_START}, {"NT_STATUS_TRUST_FAILURE", NT_STATUS_TRUST_FAILURE}, {"NT_STATUS_MUTANT_LIMIT_EXCEEDED", NT_STATUS_MUTANT_LIMIT_EXCEEDED}, {"NT_STATUS_NETLOGON_NOT_STARTED", NT_STATUS_NETLOGON_NOT_STARTED}, {"NT_STATUS_ACCOUNT_EXPIRED", NT_STATUS_ACCOUNT_EXPIRED}, {"NT_STATUS_POSSIBLE_DEADLOCK", NT_STATUS_POSSIBLE_DEADLOCK}, {"NT_STATUS_NETWORK_CREDENTIAL_CONFLICT", NT_STATUS_NETWORK_CREDENTIAL_CONFLICT}, {"NT_STATUS_REMOTE_SESSION_LIMIT", NT_STATUS_REMOTE_SESSION_LIMIT}, {"NT_STATUS_EVENTLOG_FILE_CHANGED", NT_STATUS_EVENTLOG_FILE_CHANGED}, {"NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT", NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT}, {"NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT", NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT}, {"NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT", NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT}, {"NT_STATUS_DOMAIN_TRUST_INCONSISTENT", NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, {"NT_STATUS_FS_DRIVER_REQUIRED", NT_STATUS_FS_DRIVER_REQUIRED}, {"NT_STATUS_NO_USER_SESSION_KEY", NT_STATUS_NO_USER_SESSION_KEY}, {"NT_STATUS_USER_SESSION_DELETED", NT_STATUS_USER_SESSION_DELETED}, {"NT_STATUS_RESOURCE_LANG_NOT_FOUND", NT_STATUS_RESOURCE_LANG_NOT_FOUND}, {"NT_STATUS_INSUFF_SERVER_RESOURCES", NT_STATUS_INSUFF_SERVER_RESOURCES}, {"NT_STATUS_INVALID_BUFFER_SIZE", NT_STATUS_INVALID_BUFFER_SIZE}, {"NT_STATUS_INVALID_ADDRESS_COMPONENT", NT_STATUS_INVALID_ADDRESS_COMPONENT}, {"NT_STATUS_INVALID_ADDRESS_WILDCARD", NT_STATUS_INVALID_ADDRESS_WILDCARD}, {"NT_STATUS_TOO_MANY_ADDRESSES", NT_STATUS_TOO_MANY_ADDRESSES}, {"NT_STATUS_ADDRESS_ALREADY_EXISTS", NT_STATUS_ADDRESS_ALREADY_EXISTS}, {"NT_STATUS_ADDRESS_CLOSED", NT_STATUS_ADDRESS_CLOSED}, {"NT_STATUS_CONNECTION_DISCONNECTED", NT_STATUS_CONNECTION_DISCONNECTED}, {"NT_STATUS_CONNECTION_RESET", NT_STATUS_CONNECTION_RESET}, {"NT_STATUS_TOO_MANY_NODES", NT_STATUS_TOO_MANY_NODES}, {"NT_STATUS_TRANSACTION_ABORTED", NT_STATUS_TRANSACTION_ABORTED}, {"NT_STATUS_TRANSACTION_TIMED_OUT", NT_STATUS_TRANSACTION_TIMED_OUT}, {"NT_STATUS_TRANSACTION_NO_RELEASE", NT_STATUS_TRANSACTION_NO_RELEASE}, {"NT_STATUS_TRANSACTION_NO_MATCH", NT_STATUS_TRANSACTION_NO_MATCH}, {"NT_STATUS_TRANSACTION_RESPONDED", NT_STATUS_TRANSACTION_RESPONDED}, {"NT_STATUS_TRANSACTION_INVALID_ID", NT_STATUS_TRANSACTION_INVALID_ID}, {"NT_STATUS_TRANSACTION_INVALID_TYPE", NT_STATUS_TRANSACTION_INVALID_TYPE}, {"NT_STATUS_NOT_SERVER_SESSION", NT_STATUS_NOT_SERVER_SESSION}, {"NT_STATUS_NOT_CLIENT_SESSION", NT_STATUS_NOT_CLIENT_SESSION}, {"NT_STATUS_CANNOT_LOAD_REGISTRY_FILE", NT_STATUS_CANNOT_LOAD_REGISTRY_FILE}, {"NT_STATUS_DEBUG_ATTACH_FAILED", NT_STATUS_DEBUG_ATTACH_FAILED}, {"NT_STATUS_SYSTEM_PROCESS_TERMINATED", NT_STATUS_SYSTEM_PROCESS_TERMINATED}, {"NT_STATUS_DATA_NOT_ACCEPTED", NT_STATUS_DATA_NOT_ACCEPTED}, {"NT_STATUS_NO_BROWSER_SERVERS_FOUND", NT_STATUS_NO_BROWSER_SERVERS_FOUND}, {"NT_STATUS_VDM_HARD_ERROR", NT_STATUS_VDM_HARD_ERROR}, {"NT_STATUS_DRIVER_CANCEL_TIMEOUT", NT_STATUS_DRIVER_CANCEL_TIMEOUT}, {"NT_STATUS_REPLY_MESSAGE_MISMATCH", NT_STATUS_REPLY_MESSAGE_MISMATCH}, {"NT_STATUS_MAPPED_ALIGNMENT", NT_STATUS_MAPPED_ALIGNMENT}, {"NT_STATUS_IMAGE_CHECKSUM_MISMATCH", NT_STATUS_IMAGE_CHECKSUM_MISMATCH}, {"NT_STATUS_LOST_WRITEBEHIND_DATA", NT_STATUS_LOST_WRITEBEHIND_DATA}, {"NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID", NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID}, {"NT_STATUS_PASSWORD_MUST_CHANGE", NT_STATUS_PASSWORD_MUST_CHANGE}, {"NT_STATUS_NOT_FOUND", NT_STATUS_NOT_FOUND}, {"NT_STATUS_NOT_TINY_STREAM", NT_STATUS_NOT_TINY_STREAM}, {"NT_STATUS_RECOVERY_FAILURE", NT_STATUS_RECOVERY_FAILURE}, {"NT_STATUS_STACK_OVERFLOW_READ", NT_STATUS_STACK_OVERFLOW_READ}, {"NT_STATUS_FAIL_CHECK", NT_STATUS_FAIL_CHECK}, {"NT_STATUS_DUPLICATE_OBJECTID", NT_STATUS_DUPLICATE_OBJECTID}, {"NT_STATUS_OBJECTID_EXISTS", NT_STATUS_OBJECTID_EXISTS}, {"NT_STATUS_CONVERT_TO_LARGE", NT_STATUS_CONVERT_TO_LARGE}, {"NT_STATUS_RETRY", NT_STATUS_RETRY}, {"NT_STATUS_FOUND_OUT_OF_SCOPE", NT_STATUS_FOUND_OUT_OF_SCOPE}, {"NT_STATUS_ALLOCATE_BUCKET", NT_STATUS_ALLOCATE_BUCKET}, {"NT_STATUS_PROPSET_NOT_FOUND", NT_STATUS_PROPSET_NOT_FOUND}, {"NT_STATUS_MARSHALL_OVERFLOW", NT_STATUS_MARSHALL_OVERFLOW}, {"NT_STATUS_INVALID_VARIANT", NT_STATUS_INVALID_VARIANT}, {"NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND", NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND}, {"NT_STATUS_ACCOUNT_LOCKED_OUT", NT_STATUS_ACCOUNT_LOCKED_OUT}, {"NT_STATUS_HANDLE_NOT_CLOSABLE", NT_STATUS_HANDLE_NOT_CLOSABLE}, {"NT_STATUS_CONNECTION_REFUSED", NT_STATUS_CONNECTION_REFUSED}, {"NT_STATUS_GRACEFUL_DISCONNECT", NT_STATUS_GRACEFUL_DISCONNECT}, {"NT_STATUS_ADDRESS_ALREADY_ASSOCIATED", NT_STATUS_ADDRESS_ALREADY_ASSOCIATED}, {"NT_STATUS_ADDRESS_NOT_ASSOCIATED", NT_STATUS_ADDRESS_NOT_ASSOCIATED}, {"NT_STATUS_CONNECTION_INVALID", NT_STATUS_CONNECTION_INVALID}, {"NT_STATUS_CONNECTION_ACTIVE", NT_STATUS_CONNECTION_ACTIVE}, {"NT_STATUS_NETWORK_UNREACHABLE", NT_STATUS_NETWORK_UNREACHABLE}, {"NT_STATUS_HOST_UNREACHABLE", NT_STATUS_HOST_UNREACHABLE}, {"NT_STATUS_PROTOCOL_UNREACHABLE", NT_STATUS_PROTOCOL_UNREACHABLE}, {"NT_STATUS_PORT_UNREACHABLE", NT_STATUS_PORT_UNREACHABLE}, {"NT_STATUS_REQUEST_ABORTED", NT_STATUS_REQUEST_ABORTED}, {"NT_STATUS_CONNECTION_ABORTED", NT_STATUS_CONNECTION_ABORTED}, {"NT_STATUS_BAD_COMPRESSION_BUFFER", NT_STATUS_BAD_COMPRESSION_BUFFER}, {"NT_STATUS_USER_MAPPED_FILE", NT_STATUS_USER_MAPPED_FILE}, {"NT_STATUS_AUDIT_FAILED", NT_STATUS_AUDIT_FAILED}, {"NT_STATUS_TIMER_RESOLUTION_NOT_SET", NT_STATUS_TIMER_RESOLUTION_NOT_SET}, {"NT_STATUS_CONNECTION_COUNT_LIMIT", NT_STATUS_CONNECTION_COUNT_LIMIT}, {"NT_STATUS_LOGIN_TIME_RESTRICTION", NT_STATUS_LOGIN_TIME_RESTRICTION}, {"NT_STATUS_LOGIN_WKSTA_RESTRICTION", NT_STATUS_LOGIN_WKSTA_RESTRICTION}, {"NT_STATUS_IMAGE_MP_UP_MISMATCH", NT_STATUS_IMAGE_MP_UP_MISMATCH}, {"NT_STATUS_INSUFFICIENT_LOGON_INFO", NT_STATUS_INSUFFICIENT_LOGON_INFO}, {"NT_STATUS_BAD_DLL_ENTRYPOINT", NT_STATUS_BAD_DLL_ENTRYPOINT}, {"NT_STATUS_BAD_SERVICE_ENTRYPOINT", NT_STATUS_BAD_SERVICE_ENTRYPOINT}, {"NT_STATUS_LPC_REPLY_LOST", NT_STATUS_LPC_REPLY_LOST}, {"NT_STATUS_IP_ADDRESS_CONFLICT1", NT_STATUS_IP_ADDRESS_CONFLICT1}, {"NT_STATUS_IP_ADDRESS_CONFLICT2", NT_STATUS_IP_ADDRESS_CONFLICT2}, {"NT_STATUS_REGISTRY_QUOTA_LIMIT", NT_STATUS_REGISTRY_QUOTA_LIMIT}, {"NT_STATUS_PATH_NOT_COVERED", NT_STATUS_PATH_NOT_COVERED}, {"NT_STATUS_NO_CALLBACK_ACTIVE", NT_STATUS_NO_CALLBACK_ACTIVE}, {"NT_STATUS_LICENSE_QUOTA_EXCEEDED", NT_STATUS_LICENSE_QUOTA_EXCEEDED}, {"NT_STATUS_PWD_TOO_SHORT", NT_STATUS_PWD_TOO_SHORT}, {"NT_STATUS_PWD_TOO_RECENT", NT_STATUS_PWD_TOO_RECENT}, {"NT_STATUS_PWD_HISTORY_CONFLICT", NT_STATUS_PWD_HISTORY_CONFLICT}, {"NT_STATUS_PLUGPLAY_NO_DEVICE", NT_STATUS_PLUGPLAY_NO_DEVICE}, {"NT_STATUS_UNSUPPORTED_COMPRESSION", NT_STATUS_UNSUPPORTED_COMPRESSION}, {"NT_STATUS_INVALID_HW_PROFILE", NT_STATUS_INVALID_HW_PROFILE}, {"NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH", NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH}, {"NT_STATUS_DRIVER_ORDINAL_NOT_FOUND", NT_STATUS_DRIVER_ORDINAL_NOT_FOUND}, {"NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND", NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND}, {"NT_STATUS_RESOURCE_NOT_OWNED", NT_STATUS_RESOURCE_NOT_OWNED}, {"NT_STATUS_TOO_MANY_LINKS", NT_STATUS_TOO_MANY_LINKS}, {"NT_STATUS_QUOTA_LIST_INCONSISTENT", NT_STATUS_QUOTA_LIST_INCONSISTENT}, {"NT_STATUS_FILE_IS_OFFLINE", NT_STATUS_FILE_IS_OFFLINE}, {"NT_STATUS_NO_MORE_ENTRIES", NT_STATUS_NO_MORE_ENTRIES}, {"STATUS_MORE_ENTRIES", STATUS_MORE_ENTRIES}, {"STATUS_SOME_UNMAPPED", STATUS_SOME_UNMAPPED}, {NULL, 0} };
gpl-2.0
djcapelis/linux-kernel-opensparc-fpga
drivers/video/backlight/jornada720_bl.c
160
3989
/* * * Backlight driver for HP Jornada 700 series (710/720/728) * Copyright (C) 2006-2009 Kristoffer Ericson <kristoffer.ericson@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 or any later version as published by the Free Software Foundation. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/backlight.h> #include <linux/device.h> #include <linux/fb.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <mach/jornada720.h> #include <mach/hardware.h> #include <video/s1d13xxxfb.h> #define BL_MAX_BRIGHT 255 #define BL_DEF_BRIGHT 25 static int jornada_bl_get_brightness(struct backlight_device *bd) { int ret; /* check if backlight is on */ if (!(PPSR & PPC_LDD1)) return 0; jornada_ssp_start(); /* cmd should return txdummy */ ret = jornada_ssp_byte(GETBRIGHTNESS); if (jornada_ssp_byte(GETBRIGHTNESS) != TXDUMMY) { pr_err("get brightness timeout\n"); jornada_ssp_end(); return -ETIMEDOUT; } else /* exchange txdummy for value */ ret = jornada_ssp_byte(TXDUMMY); jornada_ssp_end(); return BL_MAX_BRIGHT - ret; } static int jornada_bl_update_status(struct backlight_device *bd) { int ret = 0; jornada_ssp_start(); /* If backlight is off then really turn it off */ if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) { ret = jornada_ssp_byte(BRIGHTNESSOFF); if (ret != TXDUMMY) { pr_info("brightness off timeout\n"); /* turn off backlight */ PPSR &= ~PPC_LDD1; PPDR |= PPC_LDD1; ret = -ETIMEDOUT; } } else /* turn on backlight */ PPSR |= PPC_LDD1; /* send command to our mcu */ if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) { pr_info("failed to set brightness\n"); ret = -ETIMEDOUT; goto out; } /* * at this point we expect that the mcu has accepted * our command and is waiting for our new value * please note that maximum brightness is 255, * but due to physical layout it is equal to 0, so we simply * invert the value (MAX VALUE - NEW VALUE). */ if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness) != TXDUMMY) { pr_err("set brightness failed\n"); ret = -ETIMEDOUT; } /* * If infact we get an TXDUMMY as output we are happy and dont * make any further comments about it */ out: jornada_ssp_end(); return ret; } static const struct backlight_ops jornada_bl_ops = { .get_brightness = jornada_bl_get_brightness, .update_status = jornada_bl_update_status, .options = BL_CORE_SUSPENDRESUME, }; static int jornada_bl_probe(struct platform_device *pdev) { struct backlight_properties props; int ret; struct backlight_device *bd; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = BL_MAX_BRIGHT; bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL, &jornada_bl_ops, &props); if (IS_ERR(bd)) { ret = PTR_ERR(bd); pr_err("failed to register device, err=%x\n", ret); return ret; } bd->props.power = FB_BLANK_UNBLANK; bd->props.brightness = BL_DEF_BRIGHT; /* * note. make sure max brightness is set otherwise * you will get seemingly non-related errors when * trying to change brightness */ jornada_bl_update_status(bd); platform_set_drvdata(pdev, bd); pr_info("HP Jornada 700 series backlight driver\n"); return 0; } static int jornada_bl_remove(struct platform_device *pdev) { struct backlight_device *bd = platform_get_drvdata(pdev); backlight_device_unregister(bd); return 0; } static struct platform_driver jornada_bl_driver = { .probe = jornada_bl_probe, .remove = jornada_bl_remove, .driver = { .name = "jornada_bl", }, }; module_platform_driver(jornada_bl_driver); MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson>"); MODULE_DESCRIPTION("HP Jornada 710/720/728 Backlight driver"); MODULE_LICENSE("GPL");
gpl-2.0
lce67/android_kernel_htc_inc
sound/oss/midibuf.c
160
8826
/* * sound/oss/midibuf.c * * Device file manager for /dev/midi# */ /* * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. */ /* * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed) */ #include <linux/stddef.h> #include <linux/kmod.h> #include <linux/spinlock.h> #define MIDIBUF_C #include "sound_config.h" /* * Don't make MAX_QUEUE_SIZE larger than 4000 */ #define MAX_QUEUE_SIZE 4000 static wait_queue_head_t midi_sleeper[MAX_MIDI_DEV]; static wait_queue_head_t input_sleeper[MAX_MIDI_DEV]; struct midi_buf { int len, head, tail; unsigned char queue[MAX_QUEUE_SIZE]; }; struct midi_parms { long prech_timeout; /* * Timeout before the first ch */ }; static struct midi_buf *midi_out_buf[MAX_MIDI_DEV] = {NULL}; static struct midi_buf *midi_in_buf[MAX_MIDI_DEV] = {NULL}; static struct midi_parms parms[MAX_MIDI_DEV]; static void midi_poll(unsigned long dummy); static DEFINE_TIMER(poll_timer, midi_poll, 0, 0); static volatile int open_devs; static DEFINE_SPINLOCK(lock); #define DATA_AVAIL(q) (q->len) #define SPACE_AVAIL(q) (MAX_QUEUE_SIZE - q->len) #define QUEUE_BYTE(q, data) \ if (SPACE_AVAIL(q)) \ { \ unsigned long flags; \ spin_lock_irqsave(&lock, flags); \ q->queue[q->tail] = (data); \ q->len++; q->tail = (q->tail+1) % MAX_QUEUE_SIZE; \ spin_unlock_irqrestore(&lock, flags); \ } #define REMOVE_BYTE(q, data) \ if (DATA_AVAIL(q)) \ { \ unsigned long flags; \ spin_lock_irqsave(&lock, flags); \ data = q->queue[q->head]; \ q->len--; q->head = (q->head+1) % MAX_QUEUE_SIZE; \ spin_unlock_irqrestore(&lock, flags); \ } static void drain_midi_queue(int dev) { /* * Give the Midi driver time to drain its output queues */ if (midi_devs[dev]->buffer_status != NULL) while (!signal_pending(current) && midi_devs[dev]->buffer_status(dev)) interruptible_sleep_on_timeout(&midi_sleeper[dev], HZ/10); } static void midi_input_intr(int dev, unsigned char data) { if (midi_in_buf[dev] == NULL) return; if (data == 0xfe) /* * Active sensing */ return; /* * Ignore */ if (SPACE_AVAIL(midi_in_buf[dev])) { QUEUE_BYTE(midi_in_buf[dev], data); wake_up(&input_sleeper[dev]); } } static void midi_output_intr(int dev) { /* * Currently NOP */ } static void midi_poll(unsigned long dummy) { unsigned long flags; int dev; spin_lock_irqsave(&lock, flags); if (open_devs) { for (dev = 0; dev < num_midis; dev++) if (midi_devs[dev] != NULL && midi_out_buf[dev] != NULL) { while (DATA_AVAIL(midi_out_buf[dev])) { int ok; int c = midi_out_buf[dev]->queue[midi_out_buf[dev]->head]; spin_unlock_irqrestore(&lock,flags);/* Give some time to others */ ok = midi_devs[dev]->outputc(dev, c); spin_lock_irqsave(&lock, flags); if (!ok) break; midi_out_buf[dev]->head = (midi_out_buf[dev]->head + 1) % MAX_QUEUE_SIZE; midi_out_buf[dev]->len--; } if (DATA_AVAIL(midi_out_buf[dev]) < 100) wake_up(&midi_sleeper[dev]); } poll_timer.expires = (1) + jiffies; add_timer(&poll_timer); /* * Come back later */ } spin_unlock_irqrestore(&lock, flags); } int MIDIbuf_open(int dev, struct file *file) { int mode, err; dev = dev >> 4; mode = translate_mode(file); if (num_midis > MAX_MIDI_DEV) { printk(KERN_ERR "midi: Too many midi interfaces\n"); num_midis = MAX_MIDI_DEV; } if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL) return -ENXIO; /* * Interrupts disabled. Be careful */ module_put(midi_devs[dev]->owner); if ((err = midi_devs[dev]->open(dev, mode, midi_input_intr, midi_output_intr)) < 0) return err; parms[dev].prech_timeout = MAX_SCHEDULE_TIMEOUT; midi_in_buf[dev] = vmalloc(sizeof(struct midi_buf)); if (midi_in_buf[dev] == NULL) { printk(KERN_WARNING "midi: Can't allocate buffer\n"); midi_devs[dev]->close(dev); return -EIO; } midi_in_buf[dev]->len = midi_in_buf[dev]->head = midi_in_buf[dev]->tail = 0; midi_out_buf[dev] = vmalloc(sizeof(struct midi_buf)); if (midi_out_buf[dev] == NULL) { printk(KERN_WARNING "midi: Can't allocate buffer\n"); midi_devs[dev]->close(dev); vfree(midi_in_buf[dev]); midi_in_buf[dev] = NULL; return -EIO; } midi_out_buf[dev]->len = midi_out_buf[dev]->head = midi_out_buf[dev]->tail = 0; open_devs++; init_waitqueue_head(&midi_sleeper[dev]); init_waitqueue_head(&input_sleeper[dev]); if (open_devs < 2) /* This was first open */ { poll_timer.expires = 1 + jiffies; add_timer(&poll_timer); /* Start polling */ } return err; } void MIDIbuf_release(int dev, struct file *file) { int mode; dev = dev >> 4; mode = translate_mode(file); if (dev < 0 || dev >= num_midis || midi_devs[dev] == NULL) return; /* * Wait until the queue is empty */ if (mode != OPEN_READ) { midi_devs[dev]->outputc(dev, 0xfe); /* * Active sensing to shut the * devices */ while (!signal_pending(current) && DATA_AVAIL(midi_out_buf[dev])) interruptible_sleep_on(&midi_sleeper[dev]); /* * Sync */ drain_midi_queue(dev); /* * Ensure the output queues are empty */ } midi_devs[dev]->close(dev); open_devs--; if (open_devs == 0) del_timer_sync(&poll_timer); vfree(midi_in_buf[dev]); vfree(midi_out_buf[dev]); midi_in_buf[dev] = NULL; midi_out_buf[dev] = NULL; module_put(midi_devs[dev]->owner); } int MIDIbuf_write(int dev, struct file *file, const char __user *buf, int count) { int c, n, i; unsigned char tmp_data; dev = dev >> 4; if (!count) return 0; c = 0; while (c < count) { n = SPACE_AVAIL(midi_out_buf[dev]); if (n == 0) { /* * No space just now. */ if (file->f_flags & O_NONBLOCK) { c = -EAGAIN; goto out; } interruptible_sleep_on(&midi_sleeper[dev]); if (signal_pending(current)) { c = -EINTR; goto out; } n = SPACE_AVAIL(midi_out_buf[dev]); } if (n > (count - c)) n = count - c; for (i = 0; i < n; i++) { /* BROKE BROKE BROKE - CANT DO THIS WITH CLI !! */ /* yes, think the same, so I removed the cli() brackets QUEUE_BYTE is protected against interrupts */ if (copy_from_user((char *) &tmp_data, &(buf)[c], 1)) { c = -EFAULT; goto out; } QUEUE_BYTE(midi_out_buf[dev], tmp_data); c++; } } out: return c; } int MIDIbuf_read(int dev, struct file *file, char __user *buf, int count) { int n, c = 0; unsigned char tmp_data; dev = dev >> 4; if (!DATA_AVAIL(midi_in_buf[dev])) { /* * No data yet, wait */ if (file->f_flags & O_NONBLOCK) { c = -EAGAIN; goto out; } interruptible_sleep_on_timeout(&input_sleeper[dev], parms[dev].prech_timeout); if (signal_pending(current)) c = -EINTR; /* The user is getting restless */ } if (c == 0 && DATA_AVAIL(midi_in_buf[dev])) /* * Got some bytes */ { n = DATA_AVAIL(midi_in_buf[dev]); if (n > count) n = count; c = 0; while (c < n) { char *fixit; REMOVE_BYTE(midi_in_buf[dev], tmp_data); fixit = (char *) &tmp_data; /* BROKE BROKE BROKE */ /* yes removed the cli() brackets again should q->len,tail&head be atomic_t? */ if (copy_to_user(&(buf)[c], fixit, 1)) { c = -EFAULT; goto out; } c++; } } out: return c; } int MIDIbuf_ioctl(int dev, struct file *file, unsigned int cmd, void __user *arg) { int val; dev = dev >> 4; if (((cmd >> 8) & 0xff) == 'C') { if (midi_devs[dev]->coproc) /* Coprocessor ioctl */ return midi_devs[dev]->coproc->ioctl(midi_devs[dev]->coproc->devc, cmd, arg, 0); /* printk("/dev/midi%d: No coprocessor for this device\n", dev);*/ return -ENXIO; } else { switch (cmd) { case SNDCTL_MIDI_PRETIME: if (get_user(val, (int __user *)arg)) return -EFAULT; if (val < 0) val = 0; val = (HZ * val) / 10; parms[dev].prech_timeout = val; return put_user(val, (int __user *)arg); default: if (!midi_devs[dev]->ioctl) return -EINVAL; return midi_devs[dev]->ioctl(dev, cmd, arg); } } } /* No kernel lock - fine */ unsigned int MIDIbuf_poll(int dev, struct file *file, poll_table * wait) { unsigned int mask = 0; dev = dev >> 4; /* input */ poll_wait(file, &input_sleeper[dev], wait); if (DATA_AVAIL(midi_in_buf[dev])) mask |= POLLIN | POLLRDNORM; /* output */ poll_wait(file, &midi_sleeper[dev], wait); if (!SPACE_AVAIL(midi_out_buf[dev])) mask |= POLLOUT | POLLWRNORM; return mask; } int MIDIbuf_avail(int dev) { if (midi_in_buf[dev]) return DATA_AVAIL (midi_in_buf[dev]); return 0; } EXPORT_SYMBOL(MIDIbuf_avail);
gpl-2.0
targetnull/nkvm
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
160
9296
/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include <drm/drmP.h> #include "amdgpu.h" #include "atom.h" #include <linux/slab.h> #include <linux/acpi.h> /* * BIOS. */ /* If you boot an IGP board with a discrete card as the primary, * the IGP rom is not accessible via the rom bar as the IGP rom is * part of the system bios. On boot, the system bios puts a * copy of the igp rom at the start of vram if a discrete card is * present. */ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) { uint8_t __iomem *bios; resource_size_t vram_base; resource_size_t size = 256 * 1024; /* ??? */ if (!(adev->flags & AMD_IS_APU)) if (!amdgpu_card_posted(adev)) return false; adev->bios = NULL; vram_base = pci_resource_start(adev->pdev, 0); bios = ioremap(vram_base, size); if (!bios) { return false; } if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { iounmap(bios); return false; } adev->bios = kmalloc(size, GFP_KERNEL); if (adev->bios == NULL) { iounmap(bios); return false; } memcpy_fromio(adev->bios, bios, size); iounmap(bios); return true; } bool amdgpu_read_bios(struct amdgpu_device *adev) { uint8_t __iomem *bios, val1, val2; size_t size; adev->bios = NULL; /* XXX: some cards may return 0 for rom size? ddx has a workaround */ bios = pci_map_rom(adev->pdev, &size); if (!bios) { return false; } val1 = readb(&bios[0]); val2 = readb(&bios[1]); if (size == 0 || val1 != 0x55 || val2 != 0xaa) { pci_unmap_rom(adev->pdev, bios); return false; } adev->bios = kzalloc(size, GFP_KERNEL); if (adev->bios == NULL) { pci_unmap_rom(adev->pdev, bios); return false; } memcpy_fromio(adev->bios, bios, size); pci_unmap_rom(adev->pdev, bios); return true; } static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) { uint8_t __iomem *bios; size_t size; adev->bios = NULL; bios = pci_platform_rom(adev->pdev, &size); if (!bios) { return false; } if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { return false; } adev->bios = kmemdup(bios, size, GFP_KERNEL); if (adev->bios == NULL) { return false; } return true; } #ifdef CONFIG_ACPI /* ATRM is used to get the BIOS on the discrete cards in * dual-gpu systems. */ /* retrieve the ROM in 4k blocks */ #define ATRM_BIOS_PAGE 4096 /** * amdgpu_atrm_call - fetch a chunk of the vbios * * @atrm_handle: acpi ATRM handle * @bios: vbios image pointer * @offset: offset of vbios image data to fetch * @len: length of vbios image data to fetch * * Executes ATRM to fetch a chunk of the discrete * vbios image on PX systems (all asics). * Returns the length of the buffer fetched. */ static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios, int offset, int len) { acpi_status status; union acpi_object atrm_arg_elements[2], *obj; struct acpi_object_list atrm_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; atrm_arg.count = 2; atrm_arg.pointer = &atrm_arg_elements[0]; atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; atrm_arg_elements[0].integer.value = offset; atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; atrm_arg_elements[1].integer.value = len; status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); if (ACPI_FAILURE(status)) { printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); return -ENODEV; } obj = (union acpi_object *)buffer.pointer; memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); len = obj->buffer.length; kfree(buffer.pointer); return len; } static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) { int ret; int size = 256 * 1024; int i; struct pci_dev *pdev = NULL; acpi_handle dhandle, atrm_handle; acpi_status status; bool found = false; /* ATRM is for the discrete card only */ if (adev->flags & AMD_IS_APU) return false; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) continue; status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); if (!ACPI_FAILURE(status)) { found = true; break; } } if (!found) { while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) continue; status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); if (!ACPI_FAILURE(status)) { found = true; break; } } } if (!found) return false; adev->bios = kmalloc(size, GFP_KERNEL); if (!adev->bios) { DRM_ERROR("Unable to allocate bios\n"); return false; } for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { ret = amdgpu_atrm_call(atrm_handle, adev->bios, (i * ATRM_BIOS_PAGE), ATRM_BIOS_PAGE); if (ret < ATRM_BIOS_PAGE) break; } if (i == 0 || adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) { kfree(adev->bios); return false; } return true; } #else static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) { return false; } #endif static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev) { if (adev->flags & AMD_IS_APU) return igp_read_bios_from_vram(adev); else return amdgpu_asic_read_disabled_bios(adev); } #ifdef CONFIG_ACPI static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) { bool ret = false; struct acpi_table_header *hdr; acpi_size tbl_size; UEFI_ACPI_VFCT *vfct; GOP_VBIOS_CONTENT *vbios; VFCT_IMAGE_HEADER *vhdr; if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size))) return false; if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); goto out_unmap; } vfct = (UEFI_ACPI_VFCT *)hdr; if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) { DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); goto out_unmap; } vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset); vhdr = &vbios->VbiosHeader; DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n", vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction, vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength); if (vhdr->PCIBus != adev->pdev->bus->number || vhdr->PCIDevice != PCI_SLOT(adev->pdev->devfn) || vhdr->PCIFunction != PCI_FUNC(adev->pdev->devfn) || vhdr->VendorID != adev->pdev->vendor || vhdr->DeviceID != adev->pdev->device) { DRM_INFO("ACPI VFCT table is not for this card\n"); goto out_unmap; } if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) { DRM_ERROR("ACPI VFCT image truncated\n"); goto out_unmap; } adev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL); ret = !!adev->bios; out_unmap: return ret; } #else static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev) { return false; } #endif bool amdgpu_get_bios(struct amdgpu_device *adev) { bool r; uint16_t tmp; r = amdgpu_atrm_get_bios(adev); if (r == false) r = amdgpu_acpi_vfct_bios(adev); if (r == false) r = igp_read_bios_from_vram(adev); if (r == false) r = amdgpu_read_bios(adev); if (r == false) { r = amdgpu_read_disabled_bios(adev); } if (r == false) { r = amdgpu_read_platform_bios(adev); } if (r == false || adev->bios == NULL) { DRM_ERROR("Unable to locate a BIOS ROM\n"); adev->bios = NULL; return false; } if (adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) { printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]); goto free_bios; } tmp = RBIOS16(0x18); if (RBIOS8(tmp + 0x14) != 0x0) { DRM_INFO("Not an x86 BIOS ROM, not using.\n"); goto free_bios; } adev->bios_header_start = RBIOS16(0x48); if (!adev->bios_header_start) { goto free_bios; } tmp = adev->bios_header_start + 4; if (!memcmp(adev->bios + tmp, "ATOM", 4) || !memcmp(adev->bios + tmp, "MOTA", 4)) { adev->is_atom_bios = true; } else { adev->is_atom_bios = false; } DRM_DEBUG("%sBIOS detected\n", adev->is_atom_bios ? "ATOM" : "COM"); return true; free_bios: kfree(adev->bios); adev->bios = NULL; return false; }
gpl-2.0
pgielda/linux-renesas
sound/soc/samsung/idma.c
160
10145
/* * sound/soc/samsung/idma.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * I2S0's Internal DMA driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "i2s.h" #include "idma.h" #include "dma.h" #include "i2s-regs.h" #define ST_RUNNING (1<<0) #define ST_OPENED (1<<1) static const struct snd_pcm_hardware idma_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = MAX_IDMA_BUFFER, .period_bytes_min = 128, .period_bytes_max = MAX_IDMA_PERIOD, .periods_min = 1, .periods_max = 2, }; struct idma_ctrl { spinlock_t lock; int state; dma_addr_t start; dma_addr_t pos; dma_addr_t end; dma_addr_t period; dma_addr_t periodsz; void *token; void (*cb)(void *dt, int bytes_xfer); }; static struct idma_info { spinlock_t lock; void __iomem *regs; dma_addr_t lp_tx_addr; } idma; static void idma_getpos(dma_addr_t *src) { *src = idma.lp_tx_addr + (readl(idma.regs + I2STRNCNT) & 0xffffff) * 4; } static int idma_enqueue(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct idma_ctrl *prtd = substream->runtime->private_data; u32 val; spin_lock(&prtd->lock); prtd->token = (void *) substream; spin_unlock(&prtd->lock); /* Internal DMA Level0 Interrupt Address */ val = idma.lp_tx_addr + prtd->periodsz; writel(val, idma.regs + I2SLVL0ADDR); /* Start address0 of I2S internal DMA operation. */ val = idma.lp_tx_addr; writel(val, idma.regs + I2SSTR0); /* * Transfer block size for I2S internal DMA. * Should decide transfer size before start dma operation */ val = readl(idma.regs + I2SSIZE); val &= ~(I2SSIZE_TRNMSK << I2SSIZE_SHIFT); val |= (((runtime->dma_bytes >> 2) & I2SSIZE_TRNMSK) << I2SSIZE_SHIFT); writel(val, idma.regs + I2SSIZE); val = readl(idma.regs + I2SAHB); val |= AHB_INTENLVL0; writel(val, idma.regs + I2SAHB); return 0; } static void idma_setcallbk(struct snd_pcm_substream *substream, void (*cb)(void *, int)) { struct idma_ctrl *prtd = substream->runtime->private_data; spin_lock(&prtd->lock); prtd->cb = cb; spin_unlock(&prtd->lock); } static void idma_control(int op) { u32 val = readl(idma.regs + I2SAHB); spin_lock(&idma.lock); switch (op) { case LPAM_DMA_START: val |= (AHB_INTENLVL0 | AHB_DMAEN); break; case LPAM_DMA_STOP: val &= ~(AHB_INTENLVL0 | AHB_DMAEN); break; default: spin_unlock(&idma.lock); return; } writel(val, idma.regs + I2SAHB); spin_unlock(&idma.lock); } static void idma_done(void *id, int bytes_xfer) { struct snd_pcm_substream *substream = id; struct idma_ctrl *prtd = substream->runtime->private_data; if (prtd && (prtd->state & ST_RUNNING)) snd_pcm_period_elapsed(substream); } static int idma_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct idma_ctrl *prtd = substream->runtime->private_data; u32 mod = readl(idma.regs + I2SMOD); u32 ahb = readl(idma.regs + I2SAHB); ahb |= (AHB_DMARLD | AHB_INTMASK); mod |= MOD_TXS_IDMA; writel(ahb, idma.regs + I2SAHB); writel(mod, idma.regs + I2SMOD); snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); runtime->dma_bytes = params_buffer_bytes(params); prtd->start = prtd->pos = runtime->dma_addr; prtd->period = params_periods(params); prtd->periodsz = params_period_bytes(params); prtd->end = runtime->dma_addr + runtime->dma_bytes; idma_setcallbk(substream, idma_done); return 0; } static int idma_hw_free(struct snd_pcm_substream *substream) { snd_pcm_set_runtime_buffer(substream, NULL); return 0; } static int idma_prepare(struct snd_pcm_substream *substream) { struct idma_ctrl *prtd = substream->runtime->private_data; prtd->pos = prtd->start; /* flush the DMA channel */ idma_control(LPAM_DMA_STOP); idma_enqueue(substream); return 0; } static int idma_trigger(struct snd_pcm_substream *substream, int cmd) { struct idma_ctrl *prtd = substream->runtime->private_data; int ret = 0; spin_lock(&prtd->lock); switch (cmd) { case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: prtd->state |= ST_RUNNING; idma_control(LPAM_DMA_START); break; case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: prtd->state &= ~ST_RUNNING; idma_control(LPAM_DMA_STOP); break; default: ret = -EINVAL; break; } spin_unlock(&prtd->lock); return ret; } static snd_pcm_uframes_t idma_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct idma_ctrl *prtd = runtime->private_data; dma_addr_t src; unsigned long res; spin_lock(&prtd->lock); idma_getpos(&src); res = src - prtd->start; spin_unlock(&prtd->lock); return bytes_to_frames(substream->runtime, res); } static int idma_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *vma) { struct snd_pcm_runtime *runtime = substream->runtime; unsigned long size, offset; int ret; /* From snd_pcm_lib_mmap_iomem */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_flags |= VM_IO; size = vma->vm_end - vma->vm_start; offset = vma->vm_pgoff << PAGE_SHIFT; ret = io_remap_pfn_range(vma, vma->vm_start, (runtime->dma_addr + offset) >> PAGE_SHIFT, size, vma->vm_page_prot); return ret; } static irqreturn_t iis_irq(int irqno, void *dev_id) { struct idma_ctrl *prtd = (struct idma_ctrl *)dev_id; u32 iiscon, iisahb, val, addr; iisahb = readl(idma.regs + I2SAHB); iiscon = readl(idma.regs + I2SCON); val = (iisahb & AHB_LVL0INT) ? AHB_CLRLVL0INT : 0; if (val) { iisahb |= val; writel(iisahb, idma.regs + I2SAHB); addr = readl(idma.regs + I2SLVL0ADDR) - idma.lp_tx_addr; addr += prtd->periodsz; addr %= (prtd->end - prtd->start); addr += idma.lp_tx_addr; writel(addr, idma.regs + I2SLVL0ADDR); if (prtd->cb) prtd->cb(prtd->token, prtd->period); } return IRQ_HANDLED; } static int idma_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct idma_ctrl *prtd; int ret; snd_soc_set_runtime_hwparams(substream, &idma_hardware); prtd = kzalloc(sizeof(struct idma_ctrl), GFP_KERNEL); if (prtd == NULL) return -ENOMEM; ret = request_irq(IRQ_I2S0, iis_irq, 0, "i2s", prtd); if (ret < 0) { pr_err("fail to claim i2s irq , ret = %d\n", ret); kfree(prtd); return ret; } spin_lock_init(&prtd->lock); runtime->private_data = prtd; return 0; } static int idma_close(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct idma_ctrl *prtd = runtime->private_data; free_irq(IRQ_I2S0, prtd); if (!prtd) pr_err("idma_close called with prtd == NULL\n"); kfree(prtd); return 0; } static struct snd_pcm_ops idma_ops = { .open = idma_open, .close = idma_close, .ioctl = snd_pcm_lib_ioctl, .trigger = idma_trigger, .pointer = idma_pointer, .mmap = idma_mmap, .hw_params = idma_hw_params, .hw_free = idma_hw_free, .prepare = idma_prepare, }; static void idma_free(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; if (!substream) return; buf = &substream->dma_buffer; if (!buf->area) return; iounmap(buf->area); buf->area = NULL; buf->addr = 0; } static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; buf->dev.dev = pcm->card->dev; buf->private_data = NULL; /* Assign PCM buffer pointers */ buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS; buf->addr = idma.lp_tx_addr; buf->bytes = idma_hardware.buffer_bytes_max; buf->area = (unsigned char *)ioremap(buf->addr, buf->bytes); return 0; } static u64 idma_mask = DMA_BIT_MASK(32); static int idma_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_pcm *pcm = rtd->pcm; int ret = 0; if (!card->dev->dma_mask) card->dev->dma_mask = &idma_mask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { ret = preallocate_idma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); } return ret; } void idma_reg_addr_init(void __iomem *regs, dma_addr_t addr) { spin_lock_init(&idma.lock); idma.regs = regs; idma.lp_tx_addr = addr; } static struct snd_soc_platform_driver asoc_idma_platform = { .ops = &idma_ops, .pcm_new = idma_new, .pcm_free = idma_free, }; static int asoc_idma_platform_probe(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &asoc_idma_platform); } static int asoc_idma_platform_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver asoc_idma_driver = { .driver = { .name = "samsung-idma", .owner = THIS_MODULE, }, .probe = asoc_idma_platform_probe, .remove = asoc_idma_platform_remove, }; module_platform_driver(asoc_idma_driver); MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>"); MODULE_DESCRIPTION("Samsung ASoC IDMA Driver"); MODULE_LICENSE("GPL");
gpl-2.0
ASAZING/android_kernel_lanix_l900
fs/open.c
160
24872
/* * linux/fs/open.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/string.h> #include <linux/mm.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/fsnotify.h> #include <linux/module.h> #include <linux/tty.h> #include <linux/namei.h> #include <linux/backing-dev.h> #include <linux/capability.h> #include <linux/securebits.h> #include <linux/security.h> #include <linux/mount.h> #include <linux/fcntl.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/fs.h> #include <linux/personality.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/rcupdate.h> #include <linux/audit.h> #include <linux/falloc.h> #include <linux/fs_struct.h> #include <linux/ima.h> #include <linux/dnotify.h> #include <linux/compat.h> #include "internal.h" int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs, struct file *filp) { int ret; struct iattr newattrs; /* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */ if (length < 0) return -EINVAL; newattrs.ia_size = length; newattrs.ia_valid = ATTR_SIZE | time_attrs; if (filp) { newattrs.ia_file = filp; newattrs.ia_valid |= ATTR_FILE; } /* Remove suid/sgid on truncate too */ ret = should_remove_suid(dentry); if (ret) newattrs.ia_valid |= ret | ATTR_FORCE; mutex_lock(&dentry->d_inode->i_mutex); ret = notify_change(dentry, &newattrs); mutex_unlock(&dentry->d_inode->i_mutex); return ret; } long vfs_truncate(struct path *path, loff_t length) { struct inode *inode; long error; inode = path->dentry->d_inode; /* For directories it's -EISDIR, for other non-regulars - -EINVAL */ if (S_ISDIR(inode->i_mode)) return -EISDIR; if (!S_ISREG(inode->i_mode)) return -EINVAL; error = mnt_want_write(path->mnt); if (error) goto out; error = inode_permission(inode, MAY_WRITE); if (error) goto mnt_drop_write_and_out; error = -EPERM; if (IS_APPEND(inode)) goto mnt_drop_write_and_out; error = get_write_access(inode); if (error) goto mnt_drop_write_and_out; /* * Make sure that there are no leases. get_write_access() protects * against the truncate racing with a lease-granting setlease(). */ error = break_lease(inode, O_WRONLY); if (error) goto put_write_and_out; error = locks_verify_truncate(inode, NULL, length); if (!error) error = security_path_truncate(path); if (!error) error = do_truncate(path->dentry, length, 0, NULL); put_write_and_out: put_write_access(inode); mnt_drop_write_and_out: mnt_drop_write(path->mnt); out: return error; } EXPORT_SYMBOL_GPL(vfs_truncate); static long do_sys_truncate(const char __user *pathname, loff_t length) { unsigned int lookup_flags = LOOKUP_FOLLOW; struct path path; int error; if (length < 0) /* sorry, but loff_t says... */ return -EINVAL; retry: error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path); if (!error) { error = vfs_truncate(&path, length); path_put(&path); } if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE2(truncate, const char __user *, path, long, length) { return do_sys_truncate(path, length); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(truncate, const char __user *, path, compat_off_t, length) { return do_sys_truncate(path, length); } #endif static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) { struct inode *inode; struct dentry *dentry; struct fd f; int error; error = -EINVAL; if (length < 0) goto out; error = -EBADF; f = fdget(fd); if (!f.file) goto out; /* explicitly opened as large or we are on 64-bit box */ if (f.file->f_flags & O_LARGEFILE) small = 0; dentry = f.file->f_path.dentry; inode = dentry->d_inode; error = -EINVAL; if (!S_ISREG(inode->i_mode) || !(f.file->f_mode & FMODE_WRITE)) goto out_putf; error = -EINVAL; /* Cannot ftruncate over 2^31 bytes without large file support */ if (small && length > MAX_NON_LFS) goto out_putf; error = -EPERM; if (IS_APPEND(inode)) goto out_putf; sb_start_write(inode->i_sb); error = locks_verify_truncate(inode, f.file, length); if (!error) error = security_path_truncate(&f.file->f_path); if (!error) error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file); sb_end_write(inode->i_sb); out_putf: fdput(f); out: return error; } SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length) { return do_sys_ftruncate(fd, length, 1); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_ulong_t, length) { return do_sys_ftruncate(fd, length, 1); } #endif /* LFS versions of truncate are only needed on 32 bit machines */ #if BITS_PER_LONG == 32 SYSCALL_DEFINE2(truncate64, const char __user *, path, loff_t, length) { return do_sys_truncate(path, length); } SYSCALL_DEFINE2(ftruncate64, unsigned int, fd, loff_t, length) { return do_sys_ftruncate(fd, length, 0); } #endif /* BITS_PER_LONG == 32 */ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret; if (offset < 0 || len <= 0) return -EINVAL; /* Return error if mode is not supported */ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; /* Punch hole must have keep size set */ if ((mode & FALLOC_FL_PUNCH_HOLE) && !(mode & FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; /* It's not possible punch hole on append only file */ if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode)) return -EPERM; if (IS_IMMUTABLE(inode)) return -EPERM; /* * Revalidate the write permissions, in case security policy has * changed since the files were opened. */ ret = security_file_permission(file, MAY_WRITE); if (ret) return ret; if (S_ISFIFO(inode->i_mode)) return -ESPIPE; /* * Let individual file system decide if it supports preallocation * for directories or not. */ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) return -ENODEV; /* Check for wrap through zero too */ if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) return -EFBIG; if (!file->f_op->fallocate) return -EOPNOTSUPP; sb_start_write(inode->i_sb); ret = file->f_op->fallocate(file, mode, offset, len); sb_end_write(inode->i_sb); return ret; } SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len) { struct fd f = fdget(fd); int error = -EBADF; if (f.file) { error = do_fallocate(f.file, mode, offset, len); fdput(f); } return error; } /* * access() needs to use the real uid/gid, not the effective uid/gid. * We do this by temporarily clearing all FS-related capabilities and * switching the fsuid/fsgid around to the real ones. */ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode) { const struct cred *old_cred; struct cred *override_cred; struct path path; struct inode *inode; int res; unsigned int lookup_flags = LOOKUP_FOLLOW; if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */ return -EINVAL; override_cred = prepare_creds(); if (!override_cred) return -ENOMEM; override_cred->fsuid = override_cred->uid; override_cred->fsgid = override_cred->gid; if (!issecure(SECURE_NO_SETUID_FIXUP)) { /* Clear the capabilities if we switch to a non-root user */ kuid_t root_uid = make_kuid(override_cred->user_ns, 0); if (!uid_eq(override_cred->uid, root_uid)) cap_clear(override_cred->cap_effective); else override_cred->cap_effective = override_cred->cap_permitted; } old_cred = override_creds(override_cred); retry: res = user_path_at(dfd, filename, lookup_flags, &path); if (res) goto out; inode = path.dentry->d_inode; if ((mode & MAY_EXEC) && S_ISREG(inode->i_mode)) { /* * MAY_EXEC on regular files is denied if the fs is mounted * with the "noexec" flag. */ res = -EACCES; if (path.mnt->mnt_flags & MNT_NOEXEC) goto out_path_release; } res = inode_permission(inode, mode | MAY_ACCESS); /* SuS v2 requires we report a read only fs too */ if (res || !(mode & S_IWOTH) || special_file(inode->i_mode)) goto out_path_release; /* * This is a rare case where using __mnt_is_readonly() * is OK without a mnt_want/drop_write() pair. Since * no actual write to the fs is performed here, we do * not need to telegraph to that to anyone. * * By doing this, we accept that this access is * inherently racy and know that the fs may change * state before we even see this result. */ if (__mnt_is_readonly(path.mnt)) res = -EROFS; out_path_release: path_put(&path); if (retry_estale(res, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: revert_creds(old_cred); put_cred(override_cred); return res; } SYSCALL_DEFINE2(access, const char __user *, filename, int, mode) { return sys_faccessat(AT_FDCWD, filename, mode); } SYSCALL_DEFINE1(chdir, const char __user *, filename) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY; retry: error = user_path_at(AT_FDCWD, filename, lookup_flags, &path); if (error) goto out; error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); if (error) goto dput_and_out; set_fs_pwd(current->fs, &path); dput_and_out: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } SYSCALL_DEFINE1(fchdir, unsigned int, fd) { struct fd f = fdget_raw(fd); struct inode *inode; int error = -EBADF; error = -EBADF; if (!f.file) goto out; inode = file_inode(f.file); error = -ENOTDIR; if (!S_ISDIR(inode->i_mode)) goto out_putf; error = inode_permission(inode, MAY_EXEC | MAY_CHDIR); if (!error) set_fs_pwd(current->fs, &f.file->f_path); out_putf: fdput(f); out: return error; } SYSCALL_DEFINE1(chroot, const char __user *, filename) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY; retry: error = user_path_at(AT_FDCWD, filename, lookup_flags, &path); if (error) goto out; error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); if (error) goto dput_and_out; error = -EPERM; if (!nsown_capable(CAP_SYS_CHROOT)) goto dput_and_out; error = security_path_chroot(&path); if (error) goto dput_and_out; set_fs_root(current->fs, &path); error = 0; dput_and_out: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } static int chmod_common(struct path *path, umode_t mode) { struct inode *inode = path->dentry->d_inode; struct iattr newattrs; int error; error = mnt_want_write(path->mnt); if (error) return error; mutex_lock(&inode->i_mutex); error = security_path_chmod(path, mode); if (error) goto out_unlock; newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; error = notify_change(path->dentry, &newattrs); out_unlock: mutex_unlock(&inode->i_mutex); mnt_drop_write(path->mnt); return error; } SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode) { struct file * file; int err = -EBADF; file = fget(fd); if (file) { audit_inode(NULL, file->f_path.dentry, 0); err = chmod_common(&file->f_path, mode); fput(file); } return err; } SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW; retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (!error) { error = chmod_common(&path, mode); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } } return error; } SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode) { return sys_fchmodat(AT_FDCWD, filename, mode); } static int chown_common(struct path *path, uid_t user, gid_t group) { struct inode *inode = path->dentry->d_inode; int error; struct iattr newattrs; kuid_t uid; kgid_t gid; uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); newattrs.ia_valid = ATTR_CTIME; if (user != (uid_t) -1) { if (!uid_valid(uid)) return -EINVAL; newattrs.ia_valid |= ATTR_UID; newattrs.ia_uid = uid; } if (group != (gid_t) -1) { if (!gid_valid(gid)) return -EINVAL; newattrs.ia_valid |= ATTR_GID; newattrs.ia_gid = gid; } if (!S_ISDIR(inode->i_mode)) newattrs.ia_valid |= ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; mutex_lock(&inode->i_mutex); error = security_path_chown(path, uid, gid); if (!error) error = notify_change(path->dentry, &newattrs); mutex_unlock(&inode->i_mutex); return error; } SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag) { struct path path; int error = -EINVAL; int lookup_flags; if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) goto out; lookup_flags = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW; if (flag & AT_EMPTY_PATH) lookup_flags |= LOOKUP_EMPTY; retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (error) goto out; error = mnt_want_write(path.mnt); if (error) goto out_release; error = chown_common(&path, user, group); mnt_drop_write(path.mnt); out_release: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group) { return sys_fchownat(AT_FDCWD, filename, user, group, 0); } SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group) { return sys_fchownat(AT_FDCWD, filename, user, group, AT_SYMLINK_NOFOLLOW); } SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group) { struct fd f = fdget(fd); int error = -EBADF; if (!f.file) goto out; error = mnt_want_write_file(f.file); if (error) goto out_fput; audit_inode(NULL, f.file->f_path.dentry, 0); error = chown_common(&f.file->f_path, user, group); mnt_drop_write_file(f.file); out_fput: fdput(f); out: return error; } /* * You have to be very careful that these write * counts get cleaned up in error cases and * upon __fput(). This should probably never * be called outside of __dentry_open(). */ static inline int __get_file_write_access(struct inode *inode, struct vfsmount *mnt) { int error = get_write_access(inode); if (error) return error; error = __mnt_want_write(mnt); if (error) put_write_access(inode); return error; } int open_check_o_direct(struct file *f) { /* NB: we're sure to have correct a_ops only after f_op->open */ if (f->f_flags & O_DIRECT) { if (!f->f_mapping->a_ops || ((!f->f_mapping->a_ops->direct_IO) && (!f->f_mapping->a_ops->get_xip_mem))) { return -EINVAL; } } return 0; } static int do_dentry_open(struct file *f, int (*open)(struct inode *, struct file *), const struct cred *cred) { static const struct file_operations empty_fops = {}; struct inode *inode; int error; f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; if (unlikely(f->f_flags & O_PATH)) f->f_mode = FMODE_PATH; path_get(&f->f_path); inode = f->f_inode = f->f_path.dentry->d_inode; if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) { error = __get_file_write_access(inode, f->f_path.mnt); if (error) goto cleanup_file; file_take_write(f); } f->f_mapping = inode->i_mapping; if (unlikely(f->f_mode & FMODE_PATH)) { f->f_op = &empty_fops; return 0; } f->f_op = fops_get(inode->i_fop); error = security_file_open(f, cred); if (error) goto cleanup_all; error = break_lease(inode, f->f_flags); if (error) goto cleanup_all; if (!open && f->f_op) open = f->f_op->open; if (open) { error = open(inode, f); if (error) goto cleanup_all; } if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) i_readcount_inc(inode); f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping); return 0; cleanup_all: fops_put(f->f_op); if (f->f_mode & FMODE_WRITE) { if (!special_file(inode->i_mode)) { /* * We don't consider this a real * mnt_want/drop_write() pair * because it all happenend right * here, so just reset the state. */ put_write_access(inode); file_reset_write(f); __mnt_drop_write(f->f_path.mnt); } } cleanup_file: path_put(&f->f_path); f->f_path.mnt = NULL; f->f_path.dentry = NULL; f->f_inode = NULL; return error; } /** * finish_open - finish opening a file * @od: opaque open data * @dentry: pointer to dentry * @open: open callback * * This can be used to finish opening a file passed to i_op->atomic_open(). * * If the open callback is set to NULL, then the standard f_op->open() * filesystem callback is substituted. */ int finish_open(struct file *file, struct dentry *dentry, int (*open)(struct inode *, struct file *), int *opened) { int error; BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ file->f_path.dentry = dentry; error = do_dentry_open(file, open, current_cred()); if (!error) *opened |= FILE_OPENED; return error; } EXPORT_SYMBOL(finish_open); /** * finish_no_open - finish ->atomic_open() without opening the file * * @od: opaque open data * @dentry: dentry or NULL (as returned from ->lookup()) * * This can be used to set the result of a successful lookup in ->atomic_open(). * The filesystem's atomic_open() method shall return NULL after calling this. */ int finish_no_open(struct file *file, struct dentry *dentry) { file->f_path.dentry = dentry; return 1; } EXPORT_SYMBOL(finish_no_open); struct file *dentry_open(const struct path *path, int flags, const struct cred *cred) { int error; struct file *f; validate_creds(cred); /* We must always pass in a valid mount pointer. */ BUG_ON(!path->mnt); f = get_empty_filp(); if (!IS_ERR(f)) { f->f_flags = flags; f->f_path = *path; error = do_dentry_open(f, NULL, cred); if (!error) { /* from now on we need fput() to dispose of f */ error = open_check_o_direct(f); if (error) { fput(f); f = ERR_PTR(error); } } else { put_filp(f); f = ERR_PTR(error); } } return f; } EXPORT_SYMBOL(dentry_open); static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op) { int lookup_flags = 0; int acc_mode; if (flags & O_CREAT) op->mode = (mode & S_IALLUGO) | S_IFREG; else op->mode = 0; /* Must never be set by userspace */ flags &= ~FMODE_NONOTIFY & ~O_CLOEXEC; /* * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only * check for O_DSYNC if the need any syncing at all we enforce it's * always set instead of having to deal with possibly weird behaviour * for malicious applications setting only __O_SYNC. */ if (flags & __O_SYNC) flags |= O_DSYNC; /* * If we have O_PATH in the open flag. Then we * cannot have anything other than the below set of flags */ if (flags & O_PATH) { flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH; acc_mode = 0; } else { acc_mode = MAY_OPEN | ACC_MODE(flags); } op->open_flag = flags; /* O_TRUNC implies we need access checks for write permissions */ if (flags & O_TRUNC) acc_mode |= MAY_WRITE; /* Allow the LSM permission hook to distinguish append access from general write access. */ if (flags & O_APPEND) acc_mode |= MAY_APPEND; op->acc_mode = acc_mode; op->intent = flags & O_PATH ? 0 : LOOKUP_OPEN; if (flags & O_CREAT) { op->intent |= LOOKUP_CREATE; if (flags & O_EXCL) op->intent |= LOOKUP_EXCL; } if (flags & O_DIRECTORY) lookup_flags |= LOOKUP_DIRECTORY; if (!(flags & O_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; return lookup_flags; } /** * file_open_name - open file and return file pointer * * @name: struct filename containing path to open * @flags: open flags as per the open(2) second argument * @mode: mode for the new file if O_CREAT is set, else ignored * * This is the helper to open a file from kernelspace if you really * have to. But in generally you should not do this, so please move * along, nothing to see here.. */ struct file *file_open_name(struct filename *name, int flags, umode_t mode) { struct open_flags op; int lookup = build_open_flags(flags, mode, &op); return do_filp_open(AT_FDCWD, name, &op, lookup); } /** * filp_open - open file and return file pointer * * @filename: path to open * @flags: open flags as per the open(2) second argument * @mode: mode for the new file if O_CREAT is set, else ignored * * This is the helper to open a file from kernelspace if you really * have to. But in generally you should not do this, so please move * along, nothing to see here.. */ struct file *filp_open(const char *filename, int flags, umode_t mode) { struct filename name = {.name = filename}; return file_open_name(&name, flags, mode); } EXPORT_SYMBOL(filp_open); struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *filename, int flags) { struct open_flags op; int lookup = build_open_flags(flags, 0, &op); if (flags & O_CREAT) return ERR_PTR(-EINVAL); if (!filename && (flags & O_DIRECTORY)) if (!dentry->d_inode->i_op->lookup) return ERR_PTR(-ENOTDIR); return do_file_open_root(dentry, mnt, filename, &op, lookup); } EXPORT_SYMBOL(file_open_root); long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) { struct open_flags op; int lookup = build_open_flags(flags, mode, &op); struct filename *tmp = getname(filename); int fd = PTR_ERR(tmp); if (!IS_ERR(tmp)) { fd = get_unused_fd_flags(flags); if (fd >= 0) { struct file *f = do_filp_open(dfd, tmp, &op, lookup); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); } else { fsnotify_open(f); fd_install(fd, f); } } putname(tmp); } return fd; } SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode) { if (force_o_largefile()) flags |= O_LARGEFILE; return do_sys_open(AT_FDCWD, filename, flags, mode); } SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags, umode_t, mode) { if (force_o_largefile()) flags |= O_LARGEFILE; return do_sys_open(dfd, filename, flags, mode); } #ifndef __alpha__ /* * For backward compatibility? Maybe this should be moved * into arch/i386 instead? */ SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode) { return sys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode); } #endif /* * "id" is the POSIX thread ID. We use the * files pointer for this.. */ int filp_close(struct file *filp, fl_owner_t id) { int retval = 0; if (!file_count(filp)) { printk(KERN_ERR "VFS: Close: file count is 0\n"); return 0; } if (filp->f_op && filp->f_op->flush) retval = filp->f_op->flush(filp, id); if (likely(!(filp->f_mode & FMODE_PATH))) { dnotify_flush(filp, id); locks_remove_posix(filp, id); } fput(filp); return retval; } EXPORT_SYMBOL(filp_close); /* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ SYSCALL_DEFINE1(close, unsigned int, fd) { int retval = __close_fd(current->files, fd); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; } EXPORT_SYMBOL(sys_close); /* * This routine simulates a hangup on the tty, to arrange that users * are given clean terminals at login time. */ SYSCALL_DEFINE0(vhangup) { if (capable(CAP_SYS_TTY_CONFIG)) { tty_vhangup_self(); return 0; } return -EPERM; } /* * Called when an inode is about to be open. * We use this to disallow opening large files on 32bit systems if * the caller didn't specify O_LARGEFILE. On 64bit systems we force * on this flag in sys_open. */ int generic_file_open(struct inode * inode, struct file * filp) { if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) return -EOVERFLOW; return 0; } EXPORT_SYMBOL(generic_file_open); /* * This is used by subsystems that don't want seekable * file descriptors. The function is not supposed to ever fail, the only * reason it returns an 'int' and not 'void' is so that it can be plugged * directly into file_operations structure. */ int nonseekable_open(struct inode *inode, struct file *filp) { filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); return 0; } EXPORT_SYMBOL(nonseekable_open);
gpl-2.0
SmithGitHu/linux
arch/arm/mach-shmobile/clock.c
416
2081
/* * SH-Mobile Clock Framework * * Copyright (C) 2010 Magnus Damm * * Used together with arch/arm/common/clkdev.c and drivers/sh/clk.c. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/init.h> #ifdef CONFIG_COMMON_CLK #include <linux/clk.h> #include <linux/clkdev.h> #include "clock.h" void __init shmobile_clk_workaround(const struct clk_name *clks, int nr_clks, bool enable) { const struct clk_name *clkn; struct clk *clk; unsigned int i; for (i = 0; i < nr_clks; ++i) { clkn = clks + i; clk = clk_get(NULL, clkn->clk); if (!IS_ERR(clk)) { clk_register_clkdev(clk, clkn->con_id, clkn->dev_id); if (enable) clk_prepare_enable(clk); clk_put(clk); } } } #else /* CONFIG_COMMON_CLK */ #include <linux/sh_clk.h> #include <linux/export.h> #include "clock.h" #include "common.h" unsigned long shmobile_fixed_ratio_clk_recalc(struct clk *clk) { struct clk_ratio *p = clk->priv; return clk->parent->rate / p->div * p->mul; }; struct sh_clk_ops shmobile_fixed_ratio_clk_ops = { .recalc = shmobile_fixed_ratio_clk_recalc, }; int __init shmobile_clk_init(void) { /* Kick the child clocks.. */ recalculate_root_clocks(); /* Enable the necessary init clocks */ clk_enable_init_clocks(); return 0; } int __clk_get(struct clk *clk) { return 1; } EXPORT_SYMBOL(__clk_get); void __clk_put(struct clk *clk) { } EXPORT_SYMBOL(__clk_put); #endif /* CONFIG_COMMON_CLK */
gpl-2.0
elkingtonmcb/linux
drivers/media/usb/dvb-usb/dib0700_core.c
672
22970
/* Linux driver for devices based on the DiBcom DiB0700 USB bridge * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation, version 2. * * Copyright (C) 2005-6 DiBcom, SA */ #include "dib0700.h" /* debug */ int dvb_usb_dib0700_debug; module_param_named(debug,dvb_usb_dib0700_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,2=fw,4=fwdata,8=data (or-able))." DVB_USB_DEBUG_STATUS); static int nb_packet_buffer_size = 21; module_param(nb_packet_buffer_size, int, 0644); MODULE_PARM_DESC(nb_packet_buffer_size, "Set the dib0700 driver data buffer size. This parameter " "corresponds to the number of TS packets. The actual size of " "the data buffer corresponds to this parameter " "multiplied by 188 (default: 21)"); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); int dib0700_get_version(struct dvb_usb_device *d, u32 *hwversion, u32 *romversion, u32 *ramversion, u32 *fwtype) { struct dib0700_state *st = d->priv; int ret; if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); return -EINTR; } ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), REQUEST_GET_VERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, st->buf, 16, USB_CTRL_GET_TIMEOUT); if (hwversion != NULL) *hwversion = (st->buf[0] << 24) | (st->buf[1] << 16) | (st->buf[2] << 8) | st->buf[3]; if (romversion != NULL) *romversion = (st->buf[4] << 24) | (st->buf[5] << 16) | (st->buf[6] << 8) | st->buf[7]; if (ramversion != NULL) *ramversion = (st->buf[8] << 24) | (st->buf[9] << 16) | (st->buf[10] << 8) | st->buf[11]; if (fwtype != NULL) *fwtype = (st->buf[12] << 24) | (st->buf[13] << 16) | (st->buf[14] << 8) | st->buf[15]; mutex_unlock(&d->usb_mutex); return ret; } /* expecting rx buffer: request data[0] data[1] ... data[2] */ static int dib0700_ctrl_wr(struct dvb_usb_device *d, u8 *tx, u8 txlen) { int status; deb_data(">>> "); debug_dump(tx, txlen, deb_data); status = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev,0), tx[0], USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, tx, txlen, USB_CTRL_GET_TIMEOUT); if (status != txlen) deb_data("ep 0 write error (status = %d, len: %d)\n",status,txlen); return status < 0 ? status : 0; } /* expecting tx buffer: request data[0] ... data[n] (n <= 4) */ int dib0700_ctrl_rd(struct dvb_usb_device *d, u8 *tx, u8 txlen, u8 *rx, u8 rxlen) { u16 index, value; int status; if (txlen < 2) { err("tx buffer length is smaller than 2. Makes no sense."); return -EINVAL; } if (txlen > 4) { err("tx buffer length is larger than 4. Not supported."); return -EINVAL; } deb_data(">>> "); debug_dump(tx,txlen,deb_data); value = ((txlen - 2) << 8) | tx[1]; index = 0; if (txlen > 2) index |= (tx[2] << 8); if (txlen > 3) index |= tx[3]; status = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev,0), tx[0], USB_TYPE_VENDOR | USB_DIR_IN, value, index, rx, rxlen, USB_CTRL_GET_TIMEOUT); if (status < 0) deb_info("ep 0 read error (status = %d)\n",status); deb_data("<<< "); debug_dump(rx, rxlen, deb_data); return status; /* length in case of success */ } int dib0700_set_gpio(struct dvb_usb_device *d, enum dib07x0_gpios gpio, u8 gpio_dir, u8 gpio_val) { struct dib0700_state *st = d->priv; int ret; if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); return -EINTR; } st->buf[0] = REQUEST_SET_GPIO; st->buf[1] = gpio; st->buf[2] = ((gpio_dir & 0x01) << 7) | ((gpio_val & 0x01) << 6); ret = dib0700_ctrl_wr(d, st->buf, 3); mutex_unlock(&d->usb_mutex); return ret; } static int dib0700_set_usb_xfer_len(struct dvb_usb_device *d, u16 nb_ts_packets) { struct dib0700_state *st = d->priv; int ret; if (st->fw_version >= 0x10201) { if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); return -EINTR; } st->buf[0] = REQUEST_SET_USB_XFER_LEN; st->buf[1] = (nb_ts_packets >> 8) & 0xff; st->buf[2] = nb_ts_packets & 0xff; deb_info("set the USB xfer len to %i Ts packet\n", nb_ts_packets); ret = dib0700_ctrl_wr(d, st->buf, 3); mutex_unlock(&d->usb_mutex); } else { deb_info("this firmware does not allow to change the USB xfer len\n"); ret = -EIO; } return ret; } /* * I2C master xfer function (supported in 1.20 firmware) */ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { /* The new i2c firmware messages are more reliable and in particular properly support i2c read calls not preceded by a write */ struct dvb_usb_device *d = i2c_get_adapdata(adap); struct dib0700_state *st = d->priv; uint8_t bus_mode = 1; /* 0=eeprom bus, 1=frontend bus */ uint8_t gen_mode = 0; /* 0=master i2c, 1=gpio i2c */ uint8_t en_start = 0; uint8_t en_stop = 0; int result, i; /* Ensure nobody else hits the i2c bus while we're sending our sequence of messages, (such as the remote control thread) */ if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EINTR; for (i = 0; i < num; i++) { if (i == 0) { /* First message in the transaction */ en_start = 1; } else if (!(msg[i].flags & I2C_M_NOSTART)) { /* Device supports repeated-start */ en_start = 1; } else { /* Not the first packet and device doesn't support repeated start */ en_start = 0; } if (i == (num - 1)) { /* Last message in the transaction */ en_stop = 1; } if (msg[i].flags & I2C_M_RD) { /* Read request */ u16 index, value; uint8_t i2c_dest; i2c_dest = (msg[i].addr << 1); value = ((en_start << 7) | (en_stop << 6) | (msg[i].len & 0x3F)) << 8 | i2c_dest; /* I2C ctrl + FE bus; */ index = ((gen_mode << 6) & 0xC0) | ((bus_mode << 4) & 0x30); result = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), REQUEST_NEW_I2C_READ, USB_TYPE_VENDOR | USB_DIR_IN, value, index, msg[i].buf, msg[i].len, USB_CTRL_GET_TIMEOUT); if (result < 0) { deb_info("i2c read error (status = %d)\n", result); break; } deb_data("<<< "); debug_dump(msg[i].buf, msg[i].len, deb_data); } else { /* Write request */ if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); mutex_unlock(&d->i2c_mutex); return -EINTR; } st->buf[0] = REQUEST_NEW_I2C_WRITE; st->buf[1] = msg[i].addr << 1; st->buf[2] = (en_start << 7) | (en_stop << 6) | (msg[i].len & 0x3F); /* I2C ctrl + FE bus; */ st->buf[3] = ((gen_mode << 6) & 0xC0) | ((bus_mode << 4) & 0x30); /* The Actual i2c payload */ memcpy(&st->buf[4], msg[i].buf, msg[i].len); deb_data(">>> "); debug_dump(st->buf, msg[i].len + 4, deb_data); result = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0), REQUEST_NEW_I2C_WRITE, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, st->buf, msg[i].len + 4, USB_CTRL_GET_TIMEOUT); mutex_unlock(&d->usb_mutex); if (result < 0) { deb_info("i2c write error (status = %d)\n", result); break; } } } mutex_unlock(&d->i2c_mutex); return i; } /* * I2C master xfer function (pre-1.20 firmware) */ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct dib0700_state *st = d->priv; int i,len; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EINTR; if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); mutex_unlock(&d->i2c_mutex); return -EINTR; } for (i = 0; i < num; i++) { /* fill in the address */ st->buf[1] = msg[i].addr << 1; /* fill the buffer */ memcpy(&st->buf[2], msg[i].buf, msg[i].len); /* write/read request */ if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { st->buf[0] = REQUEST_I2C_READ; st->buf[1] |= 1; /* special thing in the current firmware: when length is zero the read-failed */ len = dib0700_ctrl_rd(d, st->buf, msg[i].len + 2, msg[i+1].buf, msg[i+1].len); if (len <= 0) { deb_info("I2C read failed on address 0x%02x\n", msg[i].addr); break; } msg[i+1].len = len; i++; } else { st->buf[0] = REQUEST_I2C_WRITE; if (dib0700_ctrl_wr(d, st->buf, msg[i].len + 2) < 0) break; } } mutex_unlock(&d->usb_mutex); mutex_unlock(&d->i2c_mutex); return i; } static int dib0700_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct dib0700_state *st = d->priv; if (st->fw_use_new_i2c_api == 1) { /* User running at least fw 1.20 */ return dib0700_i2c_xfer_new(adap, msg, num); } else { /* Use legacy calls */ return dib0700_i2c_xfer_legacy(adap, msg, num); } } static u32 dib0700_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } struct i2c_algorithm dib0700_i2c_algo = { .master_xfer = dib0700_i2c_xfer, .functionality = dib0700_i2c_func, }; int dib0700_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { s16 ret; u8 *b; b = kmalloc(16, GFP_KERNEL); if (!b) return -ENOMEM; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), REQUEST_GET_VERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, b, 16, USB_CTRL_GET_TIMEOUT); deb_info("FW GET_VERSION length: %d\n",ret); *cold = ret <= 0; deb_info("cold: %d\n", *cold); kfree(b); return 0; } static int dib0700_set_clock(struct dvb_usb_device *d, u8 en_pll, u8 pll_src, u8 pll_range, u8 clock_gpio3, u16 pll_prediv, u16 pll_loopdiv, u16 free_div, u16 dsuScaler) { struct dib0700_state *st = d->priv; int ret; if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); return -EINTR; } st->buf[0] = REQUEST_SET_CLOCK; st->buf[1] = (en_pll << 7) | (pll_src << 6) | (pll_range << 5) | (clock_gpio3 << 4); st->buf[2] = (pll_prediv >> 8) & 0xff; /* MSB */ st->buf[3] = pll_prediv & 0xff; /* LSB */ st->buf[4] = (pll_loopdiv >> 8) & 0xff; /* MSB */ st->buf[5] = pll_loopdiv & 0xff; /* LSB */ st->buf[6] = (free_div >> 8) & 0xff; /* MSB */ st->buf[7] = free_div & 0xff; /* LSB */ st->buf[8] = (dsuScaler >> 8) & 0xff; /* MSB */ st->buf[9] = dsuScaler & 0xff; /* LSB */ ret = dib0700_ctrl_wr(d, st->buf, 10); mutex_unlock(&d->usb_mutex); return ret; } int dib0700_set_i2c_speed(struct dvb_usb_device *d, u16 scl_kHz) { struct dib0700_state *st = d->priv; u16 divider; int ret; if (scl_kHz == 0) return -EINVAL; if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); return -EINTR; } st->buf[0] = REQUEST_SET_I2C_PARAM; divider = (u16) (30000 / scl_kHz); st->buf[1] = 0; st->buf[2] = (u8) (divider >> 8); st->buf[3] = (u8) (divider & 0xff); divider = (u16) (72000 / scl_kHz); st->buf[4] = (u8) (divider >> 8); st->buf[5] = (u8) (divider & 0xff); divider = (u16) (72000 / scl_kHz); /* clock: 72MHz */ st->buf[6] = (u8) (divider >> 8); st->buf[7] = (u8) (divider & 0xff); deb_info("setting I2C speed: %04x %04x %04x (%d kHz).", (st->buf[2] << 8) | (st->buf[3]), (st->buf[4] << 8) | st->buf[5], (st->buf[6] << 8) | st->buf[7], scl_kHz); ret = dib0700_ctrl_wr(d, st->buf, 8); mutex_unlock(&d->usb_mutex); return ret; } int dib0700_ctrl_clock(struct dvb_usb_device *d, u32 clk_MHz, u8 clock_out_gp3) { switch (clk_MHz) { case 72: dib0700_set_clock(d, 1, 0, 1, clock_out_gp3, 2, 24, 0, 0x4c); break; default: return -EINVAL; } return 0; } static int dib0700_jumpram(struct usb_device *udev, u32 address) { int ret = 0, actlen; u8 *buf; buf = kmalloc(8, GFP_KERNEL); if (!buf) return -ENOMEM; buf[0] = REQUEST_JUMPRAM; buf[1] = 0; buf[2] = 0; buf[3] = 0; buf[4] = (address >> 24) & 0xff; buf[5] = (address >> 16) & 0xff; buf[6] = (address >> 8) & 0xff; buf[7] = address & 0xff; if ((ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x01),buf,8,&actlen,1000)) < 0) { deb_fw("jumpram to 0x%x failed\n",address); goto out; } if (actlen != 8) { deb_fw("jumpram to 0x%x failed\n",address); ret = -EIO; goto out; } out: kfree(buf); return ret; } int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw) { struct hexline hx; int pos = 0, ret, act_len, i, adap_num; u8 *buf; u32 fw_version; buf = kmalloc(260, GFP_KERNEL); if (!buf) return -ENOMEM; while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) { deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n", hx.addr, hx.len, hx.chk); buf[0] = hx.len; buf[1] = (hx.addr >> 8) & 0xff; buf[2] = hx.addr & 0xff; buf[3] = hx.type; memcpy(&buf[4],hx.data,hx.len); buf[4+hx.len] = hx.chk; ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 0x01), buf, hx.len + 5, &act_len, 1000); if (ret < 0) { err("firmware download failed at %d with %d",pos,ret); goto out; } } if (ret == 0) { /* start the firmware */ if ((ret = dib0700_jumpram(udev, 0x70000000)) == 0) { info("firmware started successfully."); msleep(500); } } else ret = -EIO; /* the number of ts packet has to be at least 1 */ if (nb_packet_buffer_size < 1) nb_packet_buffer_size = 1; /* get the fimware version */ usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), REQUEST_GET_VERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, buf, 16, USB_CTRL_GET_TIMEOUT); fw_version = (buf[8] << 24) | (buf[9] << 16) | (buf[10] << 8) | buf[11]; /* set the buffer size - DVB-USB is allocating URB buffers * only after the firwmare download was successful */ for (i = 0; i < dib0700_device_count; i++) { for (adap_num = 0; adap_num < dib0700_devices[i].num_adapters; adap_num++) { if (fw_version >= 0x10201) { dib0700_devices[i].adapter[adap_num].fe[0].stream.u.bulk.buffersize = 188*nb_packet_buffer_size; } else { /* for fw version older than 1.20.1, * the buffersize has to be n times 512 */ dib0700_devices[i].adapter[adap_num].fe[0].stream.u.bulk.buffersize = ((188*nb_packet_buffer_size+188/2)/512)*512; if (dib0700_devices[i].adapter[adap_num].fe[0].stream.u.bulk.buffersize < 512) dib0700_devices[i].adapter[adap_num].fe[0].stream.u.bulk.buffersize = 512; } } } out: kfree(buf); return ret; } int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { struct dib0700_state *st = adap->dev->priv; int ret; if ((onoff != 0) && (st->fw_version >= 0x10201)) { /* for firmware later than 1.20.1, * the USB xfer length can be set */ ret = dib0700_set_usb_xfer_len(adap->dev, st->nb_packet_buffer_size); if (ret < 0) { deb_info("can not set the USB xfer len\n"); return ret; } } mutex_lock(&adap->dev->usb_mutex); st->buf[0] = REQUEST_ENABLE_VIDEO; /* this bit gives a kind of command, * rather than enabling something or not */ st->buf[1] = (onoff << 4) | 0x00; if (st->disable_streaming_master_mode == 1) st->buf[2] = 0x00; else st->buf[2] = 0x01 << 4; /* Master mode */ st->buf[3] = 0x00; deb_info("modifying (%d) streaming state for %d\n", onoff, adap->id); st->channel_state &= ~0x3; if ((adap->fe_adap[0].stream.props.endpoint != 2) && (adap->fe_adap[0].stream.props.endpoint != 3)) { deb_info("the endpoint number (%i) is not correct, use the adapter id instead", adap->fe_adap[0].stream.props.endpoint); if (onoff) st->channel_state |= 1 << (adap->id); else st->channel_state |= 1 << ~(adap->id); } else { if (onoff) st->channel_state |= 1 << (adap->fe_adap[0].stream.props.endpoint-2); else st->channel_state |= 1 << (3-adap->fe_adap[0].stream.props.endpoint); } st->buf[2] |= st->channel_state; deb_info("data for streaming: %x %x\n", st->buf[1], st->buf[2]); ret = dib0700_ctrl_wr(adap->dev, st->buf, 4); mutex_unlock(&adap->dev->usb_mutex); return ret; } int dib0700_change_protocol(struct rc_dev *rc, u64 *rc_type) { struct dvb_usb_device *d = rc->priv; struct dib0700_state *st = d->priv; int new_proto, ret; if (mutex_lock_interruptible(&d->usb_mutex) < 0) { err("could not acquire lock"); return -EINTR; } st->buf[0] = REQUEST_SET_RC; st->buf[1] = 0; st->buf[2] = 0; /* Set the IR mode */ if (*rc_type & RC_BIT_RC5) { new_proto = 1; *rc_type = RC_BIT_RC5; } else if (*rc_type & RC_BIT_NEC) { new_proto = 0; *rc_type = RC_BIT_NEC; } else if (*rc_type & RC_BIT_RC6_MCE) { if (st->fw_version < 0x10200) { ret = -EINVAL; goto out; } new_proto = 2; *rc_type = RC_BIT_RC6_MCE; } else { ret = -EINVAL; goto out; } st->buf[1] = new_proto; ret = dib0700_ctrl_wr(d, st->buf, 3); if (ret < 0) { err("ir protocol setup failed"); goto out; } d->props.rc.core.protocol = *rc_type; out: mutex_unlock(&d->usb_mutex); return ret; } /* This is the structure of the RC response packet starting in firmware 1.20 */ struct dib0700_rc_response { u8 report_id; u8 data_state; union { struct { u8 system; u8 not_system; u8 data; u8 not_data; } nec; struct { u8 not_used; u8 system; u8 data; u8 not_data; } rc5; }; }; #define RC_MSG_SIZE_V1_20 6 static void dib0700_rc_urb_completion(struct urb *purb) { struct dvb_usb_device *d = purb->context; struct dib0700_rc_response *poll_reply; enum rc_type protocol; u32 uninitialized_var(keycode); u8 toggle; deb_info("%s()\n", __func__); if (d->rc_dev == NULL) { /* This will occur if disable_rc_polling=1 */ kfree(purb->transfer_buffer); usb_free_urb(purb); return; } poll_reply = purb->transfer_buffer; if (purb->status < 0) { deb_info("discontinuing polling\n"); kfree(purb->transfer_buffer); usb_free_urb(purb); return; } if (purb->actual_length != RC_MSG_SIZE_V1_20) { deb_info("malformed rc msg size=%d\n", purb->actual_length); goto resubmit; } deb_data("IR ID = %02X state = %02X System = %02X %02X Cmd = %02X %02X (len %d)\n", poll_reply->report_id, poll_reply->data_state, poll_reply->nec.system, poll_reply->nec.not_system, poll_reply->nec.data, poll_reply->nec.not_data, purb->actual_length); switch (d->props.rc.core.protocol) { case RC_BIT_NEC: protocol = RC_TYPE_NEC; toggle = 0; /* NEC protocol sends repeat code as 0 0 0 FF */ if (poll_reply->nec.system == 0x00 && poll_reply->nec.not_system == 0x00 && poll_reply->nec.data == 0x00 && poll_reply->nec.not_data == 0xff) { poll_reply->data_state = 2; break; } if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) { deb_data("NEC32 protocol\n"); keycode = RC_SCANCODE_NEC32(poll_reply->nec.system << 24 | poll_reply->nec.not_system << 16 | poll_reply->nec.data << 8 | poll_reply->nec.not_data); } else if ((poll_reply->nec.system ^ poll_reply->nec.not_system) != 0xff) { deb_data("NEC extended protocol\n"); keycode = RC_SCANCODE_NECX(poll_reply->nec.system << 8 | poll_reply->nec.not_system, poll_reply->nec.data); } else { deb_data("NEC normal protocol\n"); keycode = RC_SCANCODE_NEC(poll_reply->nec.system, poll_reply->nec.data); } break; default: deb_data("RC5 protocol\n"); protocol = RC_TYPE_RC5; toggle = poll_reply->report_id; keycode = RC_SCANCODE_RC5(poll_reply->rc5.system, poll_reply->rc5.data); if ((poll_reply->rc5.data ^ poll_reply->rc5.not_data) != 0xff) { /* Key failed integrity check */ err("key failed integrity check: %02x %02x %02x %02x", poll_reply->rc5.not_used, poll_reply->rc5.system, poll_reply->rc5.data, poll_reply->rc5.not_data); goto resubmit; } break; } rc_keydown(d->rc_dev, protocol, keycode, toggle); resubmit: /* Clean the buffer before we requeue */ memset(purb->transfer_buffer, 0, RC_MSG_SIZE_V1_20); /* Requeue URB */ usb_submit_urb(purb, GFP_ATOMIC); } int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf) { struct dib0700_state *st = d->priv; struct urb *purb; const struct usb_endpoint_descriptor *e; int ret, rc_ep = 1; unsigned int pipe = 0; /* Poll-based. Don't initialize bulk mode */ if (st->fw_version < 0x10200 || !intf) return 0; /* Starting in firmware 1.20, the RC info is provided on a bulk pipe */ purb = usb_alloc_urb(0, GFP_KERNEL); if (purb == NULL) { err("rc usb alloc urb failed"); return -ENOMEM; } purb->transfer_buffer = kzalloc(RC_MSG_SIZE_V1_20, GFP_KERNEL); if (purb->transfer_buffer == NULL) { err("rc kzalloc failed"); usb_free_urb(purb); return -ENOMEM; } purb->status = -EINPROGRESS; /* * Some devices like the Hauppauge NovaTD model 52009 use an interrupt * endpoint, while others use a bulk one. */ e = &intf->altsetting[0].endpoint[rc_ep].desc; if (usb_endpoint_dir_in(e)) { if (usb_endpoint_xfer_bulk(e)) { pipe = usb_rcvbulkpipe(d->udev, rc_ep); usb_fill_bulk_urb(purb, d->udev, pipe, purb->transfer_buffer, RC_MSG_SIZE_V1_20, dib0700_rc_urb_completion, d); } else if (usb_endpoint_xfer_int(e)) { pipe = usb_rcvintpipe(d->udev, rc_ep); usb_fill_int_urb(purb, d->udev, pipe, purb->transfer_buffer, RC_MSG_SIZE_V1_20, dib0700_rc_urb_completion, d, 1); } } if (!pipe) { err("There's no endpoint for remote controller"); kfree(purb->transfer_buffer); usb_free_urb(purb); return 0; } ret = usb_submit_urb(purb, GFP_ATOMIC); if (ret) { err("rc submit urb failed"); kfree(purb->transfer_buffer); usb_free_urb(purb); } return ret; } static int dib0700_probe(struct usb_interface *intf, const struct usb_device_id *id) { int i; struct dvb_usb_device *dev; for (i = 0; i < dib0700_device_count; i++) if (dvb_usb_device_init(intf, &dib0700_devices[i], THIS_MODULE, &dev, adapter_nr) == 0) { struct dib0700_state *st = dev->priv; u32 hwversion, romversion, fw_version, fwtype; dib0700_get_version(dev, &hwversion, &romversion, &fw_version, &fwtype); deb_info("Firmware version: %x, %d, 0x%x, %d\n", hwversion, romversion, fw_version, fwtype); st->fw_version = fw_version; st->nb_packet_buffer_size = (u32)nb_packet_buffer_size; /* Disable polling mode on newer firmwares */ if (st->fw_version >= 0x10200) dev->props.rc.core.bulk_mode = true; else dev->props.rc.core.bulk_mode = false; dib0700_rc_setup(dev, intf); return 0; } return -ENODEV; } static struct usb_driver dib0700_driver = { .name = "dvb_usb_dib0700", .probe = dib0700_probe, .disconnect = dvb_usb_device_exit, .id_table = dib0700_usb_id_table, }; module_usb_driver(dib0700_driver); MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw"); MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
xcaliburinhand/I9000-Reoriented-for-I897-Ginger
drivers/hid/hid-a4tech.c
928
3635
/* * HID driver for some a4tech "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include "hid-ids.h" #define A4_2WHEEL_MOUSE_HACK_7 0x01 #define A4_2WHEEL_MOUSE_HACK_B8 0x02 struct a4tech_sc { unsigned long quirks; unsigned int hw_wheel; __s32 delayed_value; }; static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); if (usage->type == EV_REL && usage->code == REL_WHEEL) set_bit(REL_HWHEEL, *bit); if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007) return -1; return 0; } static int a4_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); struct input_dev *input; if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || !usage->type) return 0; input = field->hidinput->input; if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8) { if (usage->type == EV_REL && usage->code == REL_WHEEL) { a4->delayed_value = value; return 1; } if (usage->hid == 0x000100b8) { input_event(input, EV_REL, value ? REL_HWHEEL : REL_WHEEL, a4->delayed_value); return 1; } } if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007) { a4->hw_wheel = !!value; return 1; } if (usage->code == REL_WHEEL && a4->hw_wheel) { input_event(input, usage->type, REL_HWHEEL, value); return 1; } return 0; } static int a4_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct a4tech_sc *a4; int ret; a4 = kzalloc(sizeof(*a4), GFP_KERNEL); if (a4 == NULL) { dev_err(&hdev->dev, "can't alloc device descriptor\n"); ret = -ENOMEM; goto err_free; } a4->quirks = id->driver_data; hid_set_drvdata(hdev, a4); ret = hid_parse(hdev); if (ret) { dev_err(&hdev->dev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { dev_err(&hdev->dev, "hw start failed\n"); goto err_free; } return 0; err_free: kfree(a4); return ret; } static void a4_remove(struct hid_device *hdev) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); hid_hw_stop(hdev); kfree(a4); } static const struct hid_device_id a4_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU), .driver_data = A4_2WHEEL_MOUSE_HACK_7 }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D), .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, { } }; MODULE_DEVICE_TABLE(hid, a4_devices); static struct hid_driver a4_driver = { .name = "a4tech", .id_table = a4_devices, .input_mapped = a4_input_mapped, .event = a4_event, .probe = a4_probe, .remove = a4_remove, }; static int __init a4_init(void) { return hid_register_driver(&a4_driver); } static void __exit a4_exit(void) { hid_unregister_driver(&a4_driver); } module_init(a4_init); module_exit(a4_exit); MODULE_LICENSE("GPL");
gpl-2.0
yoAeroA00/android_kernel_nokia_msm8610
kernel/debug/kdb/kdb_io.c
1440
20930
/* * Kernel Debugger Architecture Independent Console I/O handler * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. */ #include <linux/module.h> #include <linux/types.h> #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/kdev_t.h> #include <linux/console.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/nmi.h> #include <linux/delay.h> #include <linux/kgdb.h> #include <linux/kdb.h> #include <linux/kallsyms.h> #include "kdb_private.h" #define CMD_BUFLEN 256 char kdb_prompt_str[CMD_BUFLEN]; int kdb_trap_printk; static int kgdb_transition_check(char *buffer) { if (buffer[0] != '+' && buffer[0] != '$') { KDB_STATE_SET(KGDB_TRANS); kdb_printf("%s", buffer); } else { int slen = strlen(buffer); if (slen > 3 && buffer[slen - 3] == '#') { kdb_gdb_state_pass(buffer); strcpy(buffer, "kgdb"); KDB_STATE_SET(DOING_KGDB); return 1; } } return 0; } static int kdb_read_get_key(char *buffer, size_t bufsize) { #define ESCAPE_UDELAY 1000 #define ESCAPE_DELAY (2*1000000/ESCAPE_UDELAY) /* 2 seconds worth of udelays */ char escape_data[5]; /* longest vt100 escape sequence is 4 bytes */ char *ped = escape_data; int escape_delay = 0; get_char_func *f, *f_escape = NULL; int key; for (f = &kdb_poll_funcs[0]; ; ++f) { if (*f == NULL) { /* Reset NMI watchdog once per poll loop */ touch_nmi_watchdog(); f = &kdb_poll_funcs[0]; } if (escape_delay == 2) { *ped = '\0'; ped = escape_data; --escape_delay; } if (escape_delay == 1) { key = *ped++; if (!*ped) --escape_delay; break; } key = (*f)(); if (key == -1) { if (escape_delay) { udelay(ESCAPE_UDELAY); --escape_delay; } continue; } if (bufsize <= 2) { if (key == '\r') key = '\n'; *buffer++ = key; *buffer = '\0'; return -1; } if (escape_delay == 0 && key == '\e') { escape_delay = ESCAPE_DELAY; ped = escape_data; f_escape = f; } if (escape_delay) { *ped++ = key; if (f_escape != f) { escape_delay = 2; continue; } if (ped - escape_data == 1) { /* \e */ continue; } else if (ped - escape_data == 2) { /* \e<something> */ if (key != '[') escape_delay = 2; continue; } else if (ped - escape_data == 3) { /* \e[<something> */ int mapkey = 0; switch (key) { case 'A': /* \e[A, up arrow */ mapkey = 16; break; case 'B': /* \e[B, down arrow */ mapkey = 14; break; case 'C': /* \e[C, right arrow */ mapkey = 6; break; case 'D': /* \e[D, left arrow */ mapkey = 2; break; case '1': /* dropthrough */ case '3': /* dropthrough */ /* \e[<1,3,4>], may be home, del, end */ case '4': mapkey = -1; break; } if (mapkey != -1) { if (mapkey > 0) { escape_data[0] = mapkey; escape_data[1] = '\0'; } escape_delay = 2; } continue; } else if (ped - escape_data == 4) { /* \e[<1,3,4><something> */ int mapkey = 0; if (key == '~') { switch (escape_data[2]) { case '1': /* \e[1~, home */ mapkey = 1; break; case '3': /* \e[3~, del */ mapkey = 4; break; case '4': /* \e[4~, end */ mapkey = 5; break; } } if (mapkey > 0) { escape_data[0] = mapkey; escape_data[1] = '\0'; } escape_delay = 2; continue; } } break; /* A key to process */ } return key; } /* * kdb_read * * This function reads a string of characters, terminated by * a newline, or by reaching the end of the supplied buffer, * from the current kernel debugger console device. * Parameters: * buffer - Address of character buffer to receive input characters. * bufsize - size, in bytes, of the character buffer * Returns: * Returns a pointer to the buffer containing the received * character string. This string will be terminated by a * newline character. * Locking: * No locks are required to be held upon entry to this * function. It is not reentrant - it relies on the fact * that while kdb is running on only one "master debug" cpu. * Remarks: * * The buffer size must be >= 2. A buffer size of 2 means that the caller only * wants a single key. * * An escape key could be the start of a vt100 control sequence such as \e[D * (left arrow) or it could be a character in its own right. The standard * method for detecting the difference is to wait for 2 seconds to see if there * are any other characters. kdb is complicated by the lack of a timer service * (interrupts are off), by multiple input sources and by the need to sometimes * return after just one key. Escape sequence processing has to be done as * states in the polling loop. */ static char *kdb_read(char *buffer, size_t bufsize) { char *cp = buffer; char *bufend = buffer+bufsize-2; /* Reserve space for newline * and null byte */ char *lastchar; char *p_tmp; char tmp; static char tmpbuffer[CMD_BUFLEN]; int len = strlen(buffer); int len_tmp; int tab = 0; int count; int i; int diag, dtab_count; int key; static int last_crlf; diag = kdbgetintenv("DTABCOUNT", &dtab_count); if (diag) dtab_count = 30; if (len > 0) { cp += len; if (*(buffer+len-1) == '\n') cp--; } lastchar = cp; *cp = '\0'; kdb_printf("%s", buffer); poll_again: key = kdb_read_get_key(buffer, bufsize); if (key == -1) return buffer; if (key != 9) tab = 0; if (key != 10 && key != 13) last_crlf = 0; switch (key) { case 8: /* backspace */ if (cp > buffer) { if (cp < lastchar) { memcpy(tmpbuffer, cp, lastchar - cp); memcpy(cp-1, tmpbuffer, lastchar - cp); } *(--lastchar) = '\0'; --cp; kdb_printf("\b%s \r", cp); tmp = *cp; *cp = '\0'; kdb_printf(kdb_prompt_str); kdb_printf("%s", buffer); *cp = tmp; } break; case 10: /* new line */ case 13: /* carriage return */ /* handle \n after \r */ if (last_crlf && last_crlf != key) break; last_crlf = key; *lastchar++ = '\n'; *lastchar++ = '\0'; if (!KDB_STATE(KGDB_TRANS)) { KDB_STATE_SET(KGDB_TRANS); kdb_printf("%s", buffer); } kdb_printf("\n"); return buffer; case 4: /* Del */ if (cp < lastchar) { memcpy(tmpbuffer, cp+1, lastchar - cp - 1); memcpy(cp, tmpbuffer, lastchar - cp - 1); *(--lastchar) = '\0'; kdb_printf("%s \r", cp); tmp = *cp; *cp = '\0'; kdb_printf(kdb_prompt_str); kdb_printf("%s", buffer); *cp = tmp; } break; case 1: /* Home */ if (cp > buffer) { kdb_printf("\r"); kdb_printf(kdb_prompt_str); cp = buffer; } break; case 5: /* End */ if (cp < lastchar) { kdb_printf("%s", cp); cp = lastchar; } break; case 2: /* Left */ if (cp > buffer) { kdb_printf("\b"); --cp; } break; case 14: /* Down */ memset(tmpbuffer, ' ', strlen(kdb_prompt_str) + (lastchar-buffer)); *(tmpbuffer+strlen(kdb_prompt_str) + (lastchar-buffer)) = '\0'; kdb_printf("\r%s\r", tmpbuffer); *lastchar = (char)key; *(lastchar+1) = '\0'; return lastchar; case 6: /* Right */ if (cp < lastchar) { kdb_printf("%c", *cp); ++cp; } break; case 16: /* Up */ memset(tmpbuffer, ' ', strlen(kdb_prompt_str) + (lastchar-buffer)); *(tmpbuffer+strlen(kdb_prompt_str) + (lastchar-buffer)) = '\0'; kdb_printf("\r%s\r", tmpbuffer); *lastchar = (char)key; *(lastchar+1) = '\0'; return lastchar; case 9: /* Tab */ if (tab < 2) ++tab; p_tmp = buffer; while (*p_tmp == ' ') p_tmp++; if (p_tmp > cp) break; memcpy(tmpbuffer, p_tmp, cp-p_tmp); *(tmpbuffer + (cp-p_tmp)) = '\0'; p_tmp = strrchr(tmpbuffer, ' '); if (p_tmp) ++p_tmp; else p_tmp = tmpbuffer; len = strlen(p_tmp); count = kallsyms_symbol_complete(p_tmp, sizeof(tmpbuffer) - (p_tmp - tmpbuffer)); if (tab == 2 && count > 0) { kdb_printf("\n%d symbols are found.", count); if (count > dtab_count) { count = dtab_count; kdb_printf(" But only first %d symbols will" " be printed.\nYou can change the" " environment variable DTABCOUNT.", count); } kdb_printf("\n"); for (i = 0; i < count; i++) { if (kallsyms_symbol_next(p_tmp, i) < 0) break; kdb_printf("%s ", p_tmp); *(p_tmp + len) = '\0'; } if (i >= dtab_count) kdb_printf("..."); kdb_printf("\n"); kdb_printf(kdb_prompt_str); kdb_printf("%s", buffer); } else if (tab != 2 && count > 0) { len_tmp = strlen(p_tmp); strncpy(p_tmp+len_tmp, cp, lastchar-cp+1); len_tmp = strlen(p_tmp); strncpy(cp, p_tmp+len, len_tmp-len + 1); len = len_tmp - len; kdb_printf("%s", cp); cp += len; lastchar += len; } kdb_nextline = 1; /* reset output line number */ break; default: if (key >= 32 && lastchar < bufend) { if (cp < lastchar) { memcpy(tmpbuffer, cp, lastchar - cp); memcpy(cp+1, tmpbuffer, lastchar - cp); *++lastchar = '\0'; *cp = key; kdb_printf("%s\r", cp); ++cp; tmp = *cp; *cp = '\0'; kdb_printf(kdb_prompt_str); kdb_printf("%s", buffer); *cp = tmp; } else { *++lastchar = '\0'; *cp++ = key; /* The kgdb transition check will hide * printed characters if we think that * kgdb is connecting, until the check * fails */ if (!KDB_STATE(KGDB_TRANS)) { if (kgdb_transition_check(buffer)) return buffer; } else { kdb_printf("%c", key); } } /* Special escape to kgdb */ if (lastchar - buffer >= 5 && strcmp(lastchar - 5, "$?#3f") == 0) { kdb_gdb_state_pass(lastchar - 5); strcpy(buffer, "kgdb"); KDB_STATE_SET(DOING_KGDB); return buffer; } if (lastchar - buffer >= 11 && strcmp(lastchar - 11, "$qSupported") == 0) { kdb_gdb_state_pass(lastchar - 11); strcpy(buffer, "kgdb"); KDB_STATE_SET(DOING_KGDB); return buffer; } } break; } goto poll_again; } /* * kdb_getstr * * Print the prompt string and read a command from the * input device. * * Parameters: * buffer Address of buffer to receive command * bufsize Size of buffer in bytes * prompt Pointer to string to use as prompt string * Returns: * Pointer to command buffer. * Locking: * None. * Remarks: * For SMP kernels, the processor number will be * substituted for %d, %x or %o in the prompt. */ char *kdb_getstr(char *buffer, size_t bufsize, char *prompt) { if (prompt && kdb_prompt_str != prompt) strncpy(kdb_prompt_str, prompt, CMD_BUFLEN); kdb_printf(kdb_prompt_str); kdb_nextline = 1; /* Prompt and input resets line number */ return kdb_read(buffer, bufsize); } /* * kdb_input_flush * * Get rid of any buffered console input. * * Parameters: * none * Returns: * nothing * Locking: * none * Remarks: * Call this function whenever you want to flush input. If there is any * outstanding input, it ignores all characters until there has been no * data for approximately 1ms. */ static void kdb_input_flush(void) { get_char_func *f; int res; int flush_delay = 1; while (flush_delay) { flush_delay--; empty: touch_nmi_watchdog(); for (f = &kdb_poll_funcs[0]; *f; ++f) { res = (*f)(); if (res != -1) { flush_delay = 1; goto empty; } } if (flush_delay) mdelay(1); } } /* * kdb_printf * * Print a string to the output device(s). * * Parameters: * printf-like format and optional args. * Returns: * 0 * Locking: * None. * Remarks: * use 'kdbcons->write()' to avoid polluting 'log_buf' with * kdb output. * * If the user is doing a cmd args | grep srch * then kdb_grepping_flag is set. * In that case we need to accumulate full lines (ending in \n) before * searching for the pattern. */ static char kdb_buffer[256]; /* A bit too big to go on stack */ static char *next_avail = kdb_buffer; static int size_avail; static int suspend_grep; /* * search arg1 to see if it contains arg2 * (kdmain.c provides flags for ^pat and pat$) * * return 1 for found, 0 for not found */ static int kdb_search_string(char *searched, char *searchfor) { char firstchar, *cp; int len1, len2; /* not counting the newline at the end of "searched" */ len1 = strlen(searched)-1; len2 = strlen(searchfor); if (len1 < len2) return 0; if (kdb_grep_leading && kdb_grep_trailing && len1 != len2) return 0; if (kdb_grep_leading) { if (!strncmp(searched, searchfor, len2)) return 1; } else if (kdb_grep_trailing) { if (!strncmp(searched+len1-len2, searchfor, len2)) return 1; } else { firstchar = *searchfor; cp = searched; while ((cp = strchr(cp, firstchar))) { if (!strncmp(cp, searchfor, len2)) return 1; cp++; } } return 0; } int vkdb_printf(const char *fmt, va_list ap) { int diag; int linecount; int colcount; int logging, saved_loglevel = 0; int saved_trap_printk; int got_printf_lock = 0; int retlen = 0; int fnd, len; char *cp, *cp2, *cphold = NULL, replaced_byte = ' '; char *moreprompt = "more> "; struct console *c = console_drivers; static DEFINE_SPINLOCK(kdb_printf_lock); unsigned long uninitialized_var(flags); preempt_disable(); saved_trap_printk = kdb_trap_printk; kdb_trap_printk = 0; /* Serialize kdb_printf if multiple cpus try to write at once. * But if any cpu goes recursive in kdb, just print the output, * even if it is interleaved with any other text. */ if (!KDB_STATE(PRINTF_LOCK)) { KDB_STATE_SET(PRINTF_LOCK); spin_lock_irqsave(&kdb_printf_lock, flags); got_printf_lock = 1; atomic_inc(&kdb_event); } else { __acquire(kdb_printf_lock); } diag = kdbgetintenv("LINES", &linecount); if (diag || linecount <= 1) linecount = 24; diag = kdbgetintenv("COLUMNS", &colcount); if (diag || colcount <= 1) colcount = 80; diag = kdbgetintenv("LOGGING", &logging); if (diag) logging = 0; if (!kdb_grepping_flag || suspend_grep) { /* normally, every vsnprintf starts a new buffer */ next_avail = kdb_buffer; size_avail = sizeof(kdb_buffer); } vsnprintf(next_avail, size_avail, fmt, ap); /* * If kdb_parse() found that the command was cmd xxx | grep yyy * then kdb_grepping_flag is set, and kdb_grep_string contains yyy * * Accumulate the print data up to a newline before searching it. * (vsnprintf does null-terminate the string that it generates) */ /* skip the search if prints are temporarily unconditional */ if (!suspend_grep && kdb_grepping_flag) { cp = strchr(kdb_buffer, '\n'); if (!cp) { /* * Special cases that don't end with newlines * but should be written without one: * The "[nn]kdb> " prompt should * appear at the front of the buffer. * * The "[nn]more " prompt should also be * (MOREPROMPT -> moreprompt) * written * but we print that ourselves, * we set the suspend_grep flag to make * it unconditional. * */ if (next_avail == kdb_buffer) { /* * these should occur after a newline, * so they will be at the front of the * buffer */ cp2 = kdb_buffer; len = strlen(kdb_prompt_str); if (!strncmp(cp2, kdb_prompt_str, len)) { /* * We're about to start a new * command, so we can go back * to normal mode. */ kdb_grepping_flag = 0; goto kdb_printit; } } /* no newline; don't search/write the buffer until one is there */ len = strlen(kdb_buffer); next_avail = kdb_buffer + len; size_avail = sizeof(kdb_buffer) - len; goto kdb_print_out; } /* * The newline is present; print through it or discard * it, depending on the results of the search. */ cp++; /* to byte after the newline */ replaced_byte = *cp; /* remember what/where it was */ cphold = cp; *cp = '\0'; /* end the string for our search */ /* * We now have a newline at the end of the string * Only continue with this output if it contains the * search string. */ fnd = kdb_search_string(kdb_buffer, kdb_grep_string); if (!fnd) { /* * At this point the complete line at the start * of kdb_buffer can be discarded, as it does * not contain what the user is looking for. * Shift the buffer left. */ *cphold = replaced_byte; strcpy(kdb_buffer, cphold); len = strlen(kdb_buffer); next_avail = kdb_buffer + len; size_avail = sizeof(kdb_buffer) - len; goto kdb_print_out; } /* * at this point the string is a full line and * should be printed, up to the null. */ } kdb_printit: /* * Write to all consoles. */ retlen = strlen(kdb_buffer); if (!dbg_kdb_mode && kgdb_connected) { gdbstub_msg_write(kdb_buffer, retlen); } else { if (dbg_io_ops && !dbg_io_ops->is_console) { len = retlen; cp = kdb_buffer; while (len--) { dbg_io_ops->write_char(*cp); cp++; } } while (c) { c->write(c, kdb_buffer, retlen); touch_nmi_watchdog(); c = c->next; } } if (logging) { saved_loglevel = console_loglevel; console_loglevel = 0; printk(KERN_INFO "%s", kdb_buffer); } if (KDB_STATE(PAGER)) { /* * Check printed string to decide how to bump the * kdb_nextline to control when the more prompt should * show up. */ int got = 0; len = retlen; while (len--) { if (kdb_buffer[len] == '\n') { kdb_nextline++; got = 0; } else if (kdb_buffer[len] == '\r') { got = 0; } else { got++; } } kdb_nextline += got / (colcount + 1); } /* check for having reached the LINES number of printed lines */ if (kdb_nextline >= linecount) { char buf1[16] = ""; #if defined(CONFIG_SMP) char buf2[32]; #endif /* Watch out for recursion here. Any routine that calls * kdb_printf will come back through here. And kdb_read * uses kdb_printf to echo on serial consoles ... */ kdb_nextline = 1; /* In case of recursion */ /* * Pause until cr. */ moreprompt = kdbgetenv("MOREPROMPT"); if (moreprompt == NULL) moreprompt = "more> "; #if defined(CONFIG_SMP) if (strchr(moreprompt, '%')) { sprintf(buf2, moreprompt, get_cpu()); put_cpu(); moreprompt = buf2; } #endif kdb_input_flush(); c = console_drivers; if (dbg_io_ops && !dbg_io_ops->is_console) { len = strlen(moreprompt); cp = moreprompt; while (len--) { dbg_io_ops->write_char(*cp); cp++; } } while (c) { c->write(c, moreprompt, strlen(moreprompt)); touch_nmi_watchdog(); c = c->next; } if (logging) printk("%s", moreprompt); kdb_read(buf1, 2); /* '2' indicates to return * immediately after getting one key. */ kdb_nextline = 1; /* Really set output line 1 */ /* empty and reset the buffer: */ kdb_buffer[0] = '\0'; next_avail = kdb_buffer; size_avail = sizeof(kdb_buffer); if ((buf1[0] == 'q') || (buf1[0] == 'Q')) { /* user hit q or Q */ KDB_FLAG_SET(CMD_INTERRUPT); /* command interrupted */ KDB_STATE_CLEAR(PAGER); /* end of command output; back to normal mode */ kdb_grepping_flag = 0; kdb_printf("\n"); } else if (buf1[0] == ' ') { kdb_printf("\r"); suspend_grep = 1; /* for this recursion */ } else if (buf1[0] == '\n') { kdb_nextline = linecount - 1; kdb_printf("\r"); suspend_grep = 1; /* for this recursion */ } else if (buf1[0] && buf1[0] != '\n') { /* user hit something other than enter */ suspend_grep = 1; /* for this recursion */ kdb_printf("\nOnly 'q' or 'Q' are processed at more " "prompt, input ignored\n"); } else if (kdb_grepping_flag) { /* user hit enter */ suspend_grep = 1; /* for this recursion */ kdb_printf("\n"); } kdb_input_flush(); } /* * For grep searches, shift the printed string left. * replaced_byte contains the character that was overwritten with * the terminating null, and cphold points to the null. * Then adjust the notion of available space in the buffer. */ if (kdb_grepping_flag && !suspend_grep) { *cphold = replaced_byte; strcpy(kdb_buffer, cphold); len = strlen(kdb_buffer); next_avail = kdb_buffer + len; size_avail = sizeof(kdb_buffer) - len; } kdb_print_out: suspend_grep = 0; /* end of what may have been a recursive call */ if (logging) console_loglevel = saved_loglevel; if (KDB_STATE(PRINTF_LOCK) && got_printf_lock) { got_printf_lock = 0; spin_unlock_irqrestore(&kdb_printf_lock, flags); KDB_STATE_CLEAR(PRINTF_LOCK); atomic_dec(&kdb_event); } else { __release(kdb_printf_lock); } kdb_trap_printk = saved_trap_printk; preempt_enable(); return retlen; } int kdb_printf(const char *fmt, ...) { va_list ap; int r; va_start(ap, fmt); r = vkdb_printf(fmt, ap); va_end(ap); return r; } EXPORT_SYMBOL_GPL(kdb_printf);
gpl-2.0
Tesla-Redux-Devices/android_kernel_mediatek_sprout
arch/arm/mach-tegra/hotplug.c
1952
1322
/* * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * Copyright (c) 2010, 2012-2013, NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/smp.h> #include <linux/clk/tegra.h> #include <asm/smp_plat.h> #include "fuse.h" #include "sleep.h" static void (*tegra_hotplug_shutdown)(void); int tegra_cpu_kill(unsigned cpu) { cpu = cpu_logical_map(cpu); /* Clock gate the CPU */ tegra_wait_cpu_in_reset(cpu); tegra_disable_cpu_clock(cpu); return 1; } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void __ref tegra_cpu_die(unsigned int cpu) { /* Clean L1 data cache */ tegra_disable_clean_inv_dcache(); /* Shut down the current CPU. */ tegra_hotplug_shutdown(); /* Should never return here. */ BUG(); } void __init tegra_hotplug_init(void) { if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) return; if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && tegra_chip_id == TEGRA20) tegra_hotplug_shutdown = tegra20_hotplug_shutdown; if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) && tegra_chip_id == TEGRA30) tegra_hotplug_shutdown = tegra30_hotplug_shutdown; }
gpl-2.0
ZeroInfinityXDA/OQC-m9
arch/arc/kernel/kgdb.c
1952
5119
/* * kgdb support for ARC * * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kgdb.h> #include <linux/sched.h> #include <asm/disasm.h> #include <asm/cacheflush.h> static void to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs, struct callee_regs *cregs) { int regno; for (regno = 0; regno <= 26; regno++) gdb_regs[_R0 + regno] = get_reg(regno, kernel_regs, cregs); for (regno = 27; regno < GDB_MAX_REGS; regno++) gdb_regs[regno] = 0; gdb_regs[_FP] = kernel_regs->fp; gdb_regs[__SP] = kernel_regs->sp; gdb_regs[_BLINK] = kernel_regs->blink; gdb_regs[_RET] = kernel_regs->ret; gdb_regs[_STATUS32] = kernel_regs->status32; gdb_regs[_LP_COUNT] = kernel_regs->lp_count; gdb_regs[_LP_END] = kernel_regs->lp_end; gdb_regs[_LP_START] = kernel_regs->lp_start; gdb_regs[_BTA] = kernel_regs->bta; gdb_regs[_STOP_PC] = kernel_regs->ret; } static void from_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs, struct callee_regs *cregs) { int regno; for (regno = 0; regno <= 26; regno++) set_reg(regno, gdb_regs[regno + _R0], kernel_regs, cregs); kernel_regs->fp = gdb_regs[_FP]; kernel_regs->sp = gdb_regs[__SP]; kernel_regs->blink = gdb_regs[_BLINK]; kernel_regs->ret = gdb_regs[_RET]; kernel_regs->status32 = gdb_regs[_STATUS32]; kernel_regs->lp_count = gdb_regs[_LP_COUNT]; kernel_regs->lp_end = gdb_regs[_LP_END]; kernel_regs->lp_start = gdb_regs[_LP_START]; kernel_regs->bta = gdb_regs[_BTA]; } void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs) { to_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *) current->thread.callee_reg); } void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs) { from_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *) current->thread.callee_reg); } void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) { if (task) to_gdb_regs(gdb_regs, task_pt_regs(task), (struct callee_regs *) task->thread.callee_reg); } struct single_step_data_t { uint16_t opcode[2]; unsigned long address[2]; int is_branch; int armed; } single_step_data; static void undo_single_step(struct pt_regs *regs) { if (single_step_data.armed) { int i; for (i = 0; i < (single_step_data.is_branch ? 2 : 1); i++) { memcpy((void *) single_step_data.address[i], &single_step_data.opcode[i], BREAK_INSTR_SIZE); flush_icache_range(single_step_data.address[i], single_step_data.address[i] + BREAK_INSTR_SIZE); } single_step_data.armed = 0; } } static void place_trap(unsigned long address, void *save) { memcpy(save, (void *) address, BREAK_INSTR_SIZE); memcpy((void *) address, &arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); flush_icache_range(address, address + BREAK_INSTR_SIZE); } static void do_single_step(struct pt_regs *regs) { single_step_data.is_branch = disasm_next_pc((unsigned long) regs->ret, regs, (struct callee_regs *) current->thread.callee_reg, &single_step_data.address[0], &single_step_data.address[1]); place_trap(single_step_data.address[0], &single_step_data.opcode[0]); if (single_step_data.is_branch) { place_trap(single_step_data.address[1], &single_step_data.opcode[1]); } single_step_data.armed++; } int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, char *remcomInBuffer, char *remcomOutBuffer, struct pt_regs *regs) { unsigned long addr; char *ptr; undo_single_step(regs); switch (remcomInBuffer[0]) { case 's': case 'c': ptr = &remcomInBuffer[1]; if (kgdb_hex2long(&ptr, &addr)) regs->ret = addr; case 'D': case 'k': atomic_set(&kgdb_cpu_doing_single_step, -1); if (remcomInBuffer[0] == 's') { do_single_step(regs); atomic_set(&kgdb_cpu_doing_single_step, smp_processor_id()); } return 0; } return -1; } unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) { return instruction_pointer(regs); } int kgdb_arch_init(void) { single_step_data.armed = 0; return 0; } void kgdb_trap(struct pt_regs *regs, int param) { /* trap_s 3 is used for breakpoints that overwrite existing * instructions, while trap_s 4 is used for compiled breakpoints. * * with trap_s 3 breakpoints the original instruction needs to be * restored and continuation needs to start at the location of the * breakpoint. * * with trap_s 4 (compiled) breakpoints, continuation needs to * start after the breakpoint. */ if (param == 3) instruction_pointer(regs) -= BREAK_INSTR_SIZE; kgdb_handle_exception(1, SIGTRAP, 0, regs); } void kgdb_arch_exit(void) { } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) { instruction_pointer(regs) = ip; } struct kgdb_arch arch_kgdb_ops = { /* breakpoint instruction: TRAP_S 0x3 */ #ifdef CONFIG_CPU_BIG_ENDIAN .gdb_bpt_instr = {0x78, 0x7e}, #else .gdb_bpt_instr = {0x7e, 0x78}, #endif };
gpl-2.0
jwyterlin/linux
arch/ia64/kernel/elfcore.c
2208
1665
#include <linux/elf.h> #include <linux/coredump.h> #include <linux/fs.h> #include <linux/mm.h> #include <asm/elf.h> Elf64_Half elf_core_extra_phdrs(void) { return GATE_EHDR->e_phnum; } int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) { const struct elf_phdr *const gate_phdrs = (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); int i; Elf64_Off ofs = 0; for (i = 0; i < GATE_EHDR->e_phnum; ++i) { struct elf_phdr phdr = gate_phdrs[i]; if (phdr.p_type == PT_LOAD) { phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); phdr.p_filesz = phdr.p_memsz; if (ofs == 0) { ofs = phdr.p_offset = offset; offset += phdr.p_filesz; } else { phdr.p_offset = ofs; } } else { phdr.p_offset += ofs; } phdr.p_paddr = 0; /* match other core phdrs */ if (!dump_emit(cprm, &phdr, sizeof(phdr))) return 0; } return 1; } int elf_core_write_extra_data(struct coredump_params *cprm) { const struct elf_phdr *const gate_phdrs = (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); int i; for (i = 0; i < GATE_EHDR->e_phnum; ++i) { if (gate_phdrs[i].p_type == PT_LOAD) { void *addr = (void *)gate_phdrs[i].p_vaddr; size_t memsz = PAGE_ALIGN(gate_phdrs[i].p_memsz); if (!dump_emit(cprm, addr, memsz)) return 0; break; } } return 1; } size_t elf_core_extra_data_size(void) { const struct elf_phdr *const gate_phdrs = (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); int i; size_t size = 0; for (i = 0; i < GATE_EHDR->e_phnum; ++i) { if (gate_phdrs[i].p_type == PT_LOAD) { size += PAGE_ALIGN(gate_phdrs[i].p_memsz); break; } } return size; }
gpl-2.0
SaberMod/android_kernel_moto_shamu
fs/notify/notification.c
2464
13696
/* * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Basic idea behind the notification queue: An fsnotify group (like inotify) * sends the userspace notification about events asynchronously some time after * the event happened. When inotify gets an event it will need to add that * event to the group notify queue. Since a single event might need to be on * multiple group's notification queues we can't add the event directly to each * queue and instead add a small "event_holder" to each queue. This event_holder * has a pointer back to the original event. Since the majority of events are * going to end up on one, and only one, notification queue we embed one * event_holder into each event. This means we have a single allocation instead * of always needing two. If the embedded event_holder is already in use by * another group a new event_holder (from fsnotify_event_holder_cachep) will be * allocated and used. */ #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/mutex.h> #include <linux/namei.h> #include <linux/path.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/fsnotify_backend.h> #include "fsnotify.h" static struct kmem_cache *fsnotify_event_cachep; static struct kmem_cache *fsnotify_event_holder_cachep; /* * This is a magic event we send when the q is too full. Since it doesn't * hold real event information we just keep one system wide and use it any time * it is needed. It's refcnt is set 1 at kernel init time and will never * get set to 0 so it will never get 'freed' */ static struct fsnotify_event *q_overflow_event; static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0); /** * fsnotify_get_cookie - return a unique cookie for use in synchronizing events. * Called from fsnotify_move, which is inlined into filesystem modules. */ u32 fsnotify_get_cookie(void) { return atomic_inc_return(&fsnotify_sync_cookie); } EXPORT_SYMBOL_GPL(fsnotify_get_cookie); /* return true if the notify queue is empty, false otherwise */ bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group) { BUG_ON(!mutex_is_locked(&group->notification_mutex)); return list_empty(&group->notification_list) ? true : false; } void fsnotify_get_event(struct fsnotify_event *event) { atomic_inc(&event->refcnt); } void fsnotify_put_event(struct fsnotify_event *event) { if (!event) return; if (atomic_dec_and_test(&event->refcnt)) { pr_debug("%s: event=%p\n", __func__, event); if (event->data_type == FSNOTIFY_EVENT_PATH) path_put(&event->path); BUG_ON(!list_empty(&event->private_data_list)); kfree(event->file_name); put_pid(event->tgid); kmem_cache_free(fsnotify_event_cachep, event); } } struct fsnotify_event_holder *fsnotify_alloc_event_holder(void) { return kmem_cache_alloc(fsnotify_event_holder_cachep, GFP_KERNEL); } void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder) { if (holder) kmem_cache_free(fsnotify_event_holder_cachep, holder); } /* * Find the private data that the group previously attached to this event when * the group added the event to the notification queue (fsnotify_add_notify_event) */ struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group, struct fsnotify_event *event) { struct fsnotify_event_private_data *lpriv; struct fsnotify_event_private_data *priv = NULL; assert_spin_locked(&event->lock); list_for_each_entry(lpriv, &event->private_data_list, event_list) { if (lpriv->group == group) { priv = lpriv; list_del(&priv->event_list); break; } } return priv; } /* * Add an event to the group notification queue. The group can later pull this * event off the queue to deal with. If the event is successfully added to the * group's notification queue, a reference is taken on event. */ struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event, struct fsnotify_event_private_data *priv, struct fsnotify_event *(*merge)(struct list_head *, struct fsnotify_event *)) { struct fsnotify_event *return_event = NULL; struct fsnotify_event_holder *holder = NULL; struct list_head *list = &group->notification_list; pr_debug("%s: group=%p event=%p priv=%p\n", __func__, group, event, priv); /* * There is one fsnotify_event_holder embedded inside each fsnotify_event. * Check if we expect to be able to use that holder. If not alloc a new * holder. * For the overflow event it's possible that something will use the in * event holder before we get the lock so we may need to jump back and * alloc a new holder, this can't happen for most events... */ if (!list_empty(&event->holder.event_list)) { alloc_holder: holder = fsnotify_alloc_event_holder(); if (!holder) return ERR_PTR(-ENOMEM); } mutex_lock(&group->notification_mutex); if (group->q_len >= group->max_events) { event = q_overflow_event; /* * we need to return the overflow event * which means we need a ref */ fsnotify_get_event(event); return_event = event; /* sorry, no private data on the overflow event */ priv = NULL; } if (!list_empty(list) && merge) { struct fsnotify_event *tmp; tmp = merge(list, event); if (tmp) { mutex_unlock(&group->notification_mutex); if (return_event) fsnotify_put_event(return_event); if (holder != &event->holder) fsnotify_destroy_event_holder(holder); return tmp; } } spin_lock(&event->lock); if (list_empty(&event->holder.event_list)) { if (unlikely(holder)) fsnotify_destroy_event_holder(holder); holder = &event->holder; } else if (unlikely(!holder)) { /* between the time we checked above and got the lock the in * event holder was used, go back and get a new one */ spin_unlock(&event->lock); mutex_unlock(&group->notification_mutex); if (return_event) { fsnotify_put_event(return_event); return_event = NULL; } goto alloc_holder; } group->q_len++; holder->event = event; fsnotify_get_event(event); list_add_tail(&holder->event_list, list); if (priv) list_add_tail(&priv->event_list, &event->private_data_list); spin_unlock(&event->lock); mutex_unlock(&group->notification_mutex); wake_up(&group->notification_waitq); kill_fasync(&group->fsn_fa, SIGIO, POLL_IN); return return_event; } /* * Remove and return the first event from the notification list. There is a * reference held on this event since it was on the list. It is the responsibility * of the caller to drop this reference. */ struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group) { struct fsnotify_event *event; struct fsnotify_event_holder *holder; BUG_ON(!mutex_is_locked(&group->notification_mutex)); pr_debug("%s: group=%p\n", __func__, group); holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list); event = holder->event; spin_lock(&event->lock); holder->event = NULL; list_del_init(&holder->event_list); spin_unlock(&event->lock); /* event == holder means we are referenced through the in event holder */ if (holder != &event->holder) fsnotify_destroy_event_holder(holder); group->q_len--; return event; } /* * This will not remove the event, that must be done with fsnotify_remove_notify_event() */ struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group) { struct fsnotify_event *event; struct fsnotify_event_holder *holder; BUG_ON(!mutex_is_locked(&group->notification_mutex)); holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list); event = holder->event; return event; } /* * Called when a group is being torn down to clean up any outstanding * event notifications. */ void fsnotify_flush_notify(struct fsnotify_group *group) { struct fsnotify_event *event; struct fsnotify_event_private_data *priv; mutex_lock(&group->notification_mutex); while (!fsnotify_notify_queue_is_empty(group)) { event = fsnotify_remove_notify_event(group); /* if they don't implement free_event_priv they better not have attached any */ if (group->ops->free_event_priv) { spin_lock(&event->lock); priv = fsnotify_remove_priv_from_event(group, event); spin_unlock(&event->lock); if (priv) group->ops->free_event_priv(priv); } fsnotify_put_event(event); /* matches fsnotify_add_notify_event */ } mutex_unlock(&group->notification_mutex); } static void initialize_event(struct fsnotify_event *event) { INIT_LIST_HEAD(&event->holder.event_list); atomic_set(&event->refcnt, 1); spin_lock_init(&event->lock); INIT_LIST_HEAD(&event->private_data_list); } /* * Caller damn well better be holding whatever mutex is protecting the * old_holder->event_list and the new_event must be a clean event which * cannot be found anywhere else in the kernel. */ int fsnotify_replace_event(struct fsnotify_event_holder *old_holder, struct fsnotify_event *new_event) { struct fsnotify_event *old_event = old_holder->event; struct fsnotify_event_holder *new_holder = &new_event->holder; enum event_spinlock_class { SPINLOCK_OLD, SPINLOCK_NEW, }; pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, new_event); /* * if the new_event's embedded holder is in use someone * screwed up and didn't give us a clean new event. */ BUG_ON(!list_empty(&new_holder->event_list)); spin_lock_nested(&old_event->lock, SPINLOCK_OLD); spin_lock_nested(&new_event->lock, SPINLOCK_NEW); new_holder->event = new_event; list_replace_init(&old_holder->event_list, &new_holder->event_list); spin_unlock(&new_event->lock); spin_unlock(&old_event->lock); /* event == holder means we are referenced through the in event holder */ if (old_holder != &old_event->holder) fsnotify_destroy_event_holder(old_holder); fsnotify_get_event(new_event); /* on the list take reference */ fsnotify_put_event(old_event); /* off the list, drop reference */ return 0; } struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event) { struct fsnotify_event *event; event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL); if (!event) return NULL; pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, event); memcpy(event, old_event, sizeof(*event)); initialize_event(event); if (event->name_len) { event->file_name = kstrdup(old_event->file_name, GFP_KERNEL); if (!event->file_name) { kmem_cache_free(fsnotify_event_cachep, event); return NULL; } } event->tgid = get_pid(old_event->tgid); if (event->data_type == FSNOTIFY_EVENT_PATH) path_get(&event->path); return event; } /* * fsnotify_create_event - Allocate a new event which will be sent to each * group's handle_event function if the group was interested in this * particular event. * * @to_tell the inode which is supposed to receive the event (sometimes a * parent of the inode to which the event happened. * @mask what actually happened. * @data pointer to the object which was actually affected * @data_type flag indication if the data is a file, path, inode, nothing... * @name the filename, if available */ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data, int data_type, const unsigned char *name, u32 cookie, gfp_t gfp) { struct fsnotify_event *event; event = kmem_cache_zalloc(fsnotify_event_cachep, gfp); if (!event) return NULL; pr_debug("%s: event=%p to_tell=%p mask=%x data=%p data_type=%d\n", __func__, event, to_tell, mask, data, data_type); initialize_event(event); if (name) { event->file_name = kstrdup(name, gfp); if (!event->file_name) { kmem_cache_free(fsnotify_event_cachep, event); return NULL; } event->name_len = strlen(event->file_name); } event->tgid = get_pid(task_tgid(current)); event->sync_cookie = cookie; event->to_tell = to_tell; event->data_type = data_type; switch (data_type) { case FSNOTIFY_EVENT_PATH: { struct path *path = data; event->path.dentry = path->dentry; event->path.mnt = path->mnt; path_get(&event->path); break; } case FSNOTIFY_EVENT_INODE: event->inode = data; break; case FSNOTIFY_EVENT_NONE: event->inode = NULL; event->path.dentry = NULL; event->path.mnt = NULL; break; default: BUG(); } event->mask = mask; return event; } static __init int fsnotify_notification_init(void) { fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC); fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC); q_overflow_event = fsnotify_create_event(NULL, FS_Q_OVERFLOW, NULL, FSNOTIFY_EVENT_NONE, NULL, 0, GFP_KERNEL); if (!q_overflow_event) panic("unable to allocate fsnotify q_overflow_event\n"); return 0; } subsys_initcall(fsnotify_notification_init);
gpl-2.0
Twisted-Kernel/S6-MM
sound/soc/codecs/ak4671.c
2720
22250
/* * ak4671.c -- audio driver for AK4671 * * Copyright (C) 2009 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/slab.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include "ak4671.h" /* codec private data */ struct ak4671_priv { enum snd_soc_control_type control_type; }; /* ak4671 register cache & default register settings */ static const u8 ak4671_reg[AK4671_CACHEREGNUM] = { 0x00, /* AK4671_AD_DA_POWER_MANAGEMENT (0x00) */ 0xf6, /* AK4671_PLL_MODE_SELECT0 (0x01) */ 0x00, /* AK4671_PLL_MODE_SELECT1 (0x02) */ 0x02, /* AK4671_FORMAT_SELECT (0x03) */ 0x00, /* AK4671_MIC_SIGNAL_SELECT (0x04) */ 0x55, /* AK4671_MIC_AMP_GAIN (0x05) */ 0x00, /* AK4671_MIXING_POWER_MANAGEMENT0 (0x06) */ 0x00, /* AK4671_MIXING_POWER_MANAGEMENT1 (0x07) */ 0xb5, /* AK4671_OUTPUT_VOLUME_CONTROL (0x08) */ 0x00, /* AK4671_LOUT1_SIGNAL_SELECT (0x09) */ 0x00, /* AK4671_ROUT1_SIGNAL_SELECT (0x0a) */ 0x00, /* AK4671_LOUT2_SIGNAL_SELECT (0x0b) */ 0x00, /* AK4671_ROUT2_SIGNAL_SELECT (0x0c) */ 0x00, /* AK4671_LOUT3_SIGNAL_SELECT (0x0d) */ 0x00, /* AK4671_ROUT3_SIGNAL_SELECT (0x0e) */ 0x00, /* AK4671_LOUT1_POWER_MANAGERMENT (0x0f) */ 0x00, /* AK4671_LOUT2_POWER_MANAGERMENT (0x10) */ 0x80, /* AK4671_LOUT3_POWER_MANAGERMENT (0x11) */ 0x91, /* AK4671_LCH_INPUT_VOLUME_CONTROL (0x12) */ 0x91, /* AK4671_RCH_INPUT_VOLUME_CONTROL (0x13) */ 0xe1, /* AK4671_ALC_REFERENCE_SELECT (0x14) */ 0x00, /* AK4671_DIGITAL_MIXING_CONTROL (0x15) */ 0x00, /* AK4671_ALC_TIMER_SELECT (0x16) */ 0x00, /* AK4671_ALC_MODE_CONTROL (0x17) */ 0x02, /* AK4671_MODE_CONTROL1 (0x18) */ 0x01, /* AK4671_MODE_CONTROL2 (0x19) */ 0x18, /* AK4671_LCH_OUTPUT_VOLUME_CONTROL (0x1a) */ 0x18, /* AK4671_RCH_OUTPUT_VOLUME_CONTROL (0x1b) */ 0x00, /* AK4671_SIDETONE_A_CONTROL (0x1c) */ 0x02, /* AK4671_DIGITAL_FILTER_SELECT (0x1d) */ 0x00, /* AK4671_FIL3_COEFFICIENT0 (0x1e) */ 0x00, /* AK4671_FIL3_COEFFICIENT1 (0x1f) */ 0x00, /* AK4671_FIL3_COEFFICIENT2 (0x20) */ 0x00, /* AK4671_FIL3_COEFFICIENT3 (0x21) */ 0x00, /* AK4671_EQ_COEFFICIENT0 (0x22) */ 0x00, /* AK4671_EQ_COEFFICIENT1 (0x23) */ 0x00, /* AK4671_EQ_COEFFICIENT2 (0x24) */ 0x00, /* AK4671_EQ_COEFFICIENT3 (0x25) */ 0x00, /* AK4671_EQ_COEFFICIENT4 (0x26) */ 0x00, /* AK4671_EQ_COEFFICIENT5 (0x27) */ 0xa9, /* AK4671_FIL1_COEFFICIENT0 (0x28) */ 0x1f, /* AK4671_FIL1_COEFFICIENT1 (0x29) */ 0xad, /* AK4671_FIL1_COEFFICIENT2 (0x2a) */ 0x20, /* AK4671_FIL1_COEFFICIENT3 (0x2b) */ 0x00, /* AK4671_FIL2_COEFFICIENT0 (0x2c) */ 0x00, /* AK4671_FIL2_COEFFICIENT1 (0x2d) */ 0x00, /* AK4671_FIL2_COEFFICIENT2 (0x2e) */ 0x00, /* AK4671_FIL2_COEFFICIENT3 (0x2f) */ 0x00, /* AK4671_DIGITAL_FILTER_SELECT2 (0x30) */ 0x00, /* this register not used */ 0x00, /* AK4671_E1_COEFFICIENT0 (0x32) */ 0x00, /* AK4671_E1_COEFFICIENT1 (0x33) */ 0x00, /* AK4671_E1_COEFFICIENT2 (0x34) */ 0x00, /* AK4671_E1_COEFFICIENT3 (0x35) */ 0x00, /* AK4671_E1_COEFFICIENT4 (0x36) */ 0x00, /* AK4671_E1_COEFFICIENT5 (0x37) */ 0x00, /* AK4671_E2_COEFFICIENT0 (0x38) */ 0x00, /* AK4671_E2_COEFFICIENT1 (0x39) */ 0x00, /* AK4671_E2_COEFFICIENT2 (0x3a) */ 0x00, /* AK4671_E2_COEFFICIENT3 (0x3b) */ 0x00, /* AK4671_E2_COEFFICIENT4 (0x3c) */ 0x00, /* AK4671_E2_COEFFICIENT5 (0x3d) */ 0x00, /* AK4671_E3_COEFFICIENT0 (0x3e) */ 0x00, /* AK4671_E3_COEFFICIENT1 (0x3f) */ 0x00, /* AK4671_E3_COEFFICIENT2 (0x40) */ 0x00, /* AK4671_E3_COEFFICIENT3 (0x41) */ 0x00, /* AK4671_E3_COEFFICIENT4 (0x42) */ 0x00, /* AK4671_E3_COEFFICIENT5 (0x43) */ 0x00, /* AK4671_E4_COEFFICIENT0 (0x44) */ 0x00, /* AK4671_E4_COEFFICIENT1 (0x45) */ 0x00, /* AK4671_E4_COEFFICIENT2 (0x46) */ 0x00, /* AK4671_E4_COEFFICIENT3 (0x47) */ 0x00, /* AK4671_E4_COEFFICIENT4 (0x48) */ 0x00, /* AK4671_E4_COEFFICIENT5 (0x49) */ 0x00, /* AK4671_E5_COEFFICIENT0 (0x4a) */ 0x00, /* AK4671_E5_COEFFICIENT1 (0x4b) */ 0x00, /* AK4671_E5_COEFFICIENT2 (0x4c) */ 0x00, /* AK4671_E5_COEFFICIENT3 (0x4d) */ 0x00, /* AK4671_E5_COEFFICIENT4 (0x4e) */ 0x00, /* AK4671_E5_COEFFICIENT5 (0x4f) */ 0x88, /* AK4671_EQ_CONTROL_250HZ_100HZ (0x50) */ 0x88, /* AK4671_EQ_CONTROL_3500HZ_1KHZ (0x51) */ 0x08, /* AK4671_EQ_CONTRO_10KHZ (0x52) */ 0x00, /* AK4671_PCM_IF_CONTROL0 (0x53) */ 0x00, /* AK4671_PCM_IF_CONTROL1 (0x54) */ 0x00, /* AK4671_PCM_IF_CONTROL2 (0x55) */ 0x18, /* AK4671_DIGITAL_VOLUME_B_CONTROL (0x56) */ 0x18, /* AK4671_DIGITAL_VOLUME_C_CONTROL (0x57) */ 0x00, /* AK4671_SIDETONE_VOLUME_CONTROL (0x58) */ 0x00, /* AK4671_DIGITAL_MIXING_CONTROL2 (0x59) */ 0x00, /* AK4671_SAR_ADC_CONTROL (0x5a) */ }; /* * LOUT1/ROUT1 output volume control: * from -24 to 6 dB in 6 dB steps (mute instead of -30 dB) */ static DECLARE_TLV_DB_SCALE(out1_tlv, -3000, 600, 1); /* * LOUT2/ROUT2 output volume control: * from -33 to 6 dB in 3 dB steps (mute instead of -33 dB) */ static DECLARE_TLV_DB_SCALE(out2_tlv, -3300, 300, 1); /* * LOUT3/ROUT3 output volume control: * from -6 to 3 dB in 3 dB steps */ static DECLARE_TLV_DB_SCALE(out3_tlv, -600, 300, 0); /* * Mic amp gain control: * from -15 to 30 dB in 3 dB steps * REVISIT: The actual min value(0x01) is -12 dB and the reg value 0x00 is not * available */ static DECLARE_TLV_DB_SCALE(mic_amp_tlv, -1500, 300, 0); static const struct snd_kcontrol_new ak4671_snd_controls[] = { /* Common playback gain controls */ SOC_SINGLE_TLV("Line Output1 Playback Volume", AK4671_OUTPUT_VOLUME_CONTROL, 0, 0x6, 0, out1_tlv), SOC_SINGLE_TLV("Headphone Output2 Playback Volume", AK4671_OUTPUT_VOLUME_CONTROL, 4, 0xd, 0, out2_tlv), SOC_SINGLE_TLV("Line Output3 Playback Volume", AK4671_LOUT3_POWER_MANAGERMENT, 6, 0x3, 0, out3_tlv), /* Common capture gain controls */ SOC_DOUBLE_TLV("Mic Amp Capture Volume", AK4671_MIC_AMP_GAIN, 0, 4, 0xf, 0, mic_amp_tlv), }; /* event handlers */ static int ak4671_out2_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; switch (event) { case SND_SOC_DAPM_POST_PMU: snd_soc_update_bits(codec, AK4671_LOUT2_POWER_MANAGERMENT, AK4671_MUTEN, AK4671_MUTEN); break; case SND_SOC_DAPM_PRE_PMD: snd_soc_update_bits(codec, AK4671_LOUT2_POWER_MANAGERMENT, AK4671_MUTEN, 0); break; } return 0; } /* Output Mixers */ static const struct snd_kcontrol_new ak4671_lout1_mixer_controls[] = { SOC_DAPM_SINGLE("DACL", AK4671_LOUT1_SIGNAL_SELECT, 0, 1, 0), SOC_DAPM_SINGLE("LINL1", AK4671_LOUT1_SIGNAL_SELECT, 1, 1, 0), SOC_DAPM_SINGLE("LINL2", AK4671_LOUT1_SIGNAL_SELECT, 2, 1, 0), SOC_DAPM_SINGLE("LINL3", AK4671_LOUT1_SIGNAL_SELECT, 3, 1, 0), SOC_DAPM_SINGLE("LINL4", AK4671_LOUT1_SIGNAL_SELECT, 4, 1, 0), SOC_DAPM_SINGLE("LOOPL", AK4671_LOUT1_SIGNAL_SELECT, 5, 1, 0), }; static const struct snd_kcontrol_new ak4671_rout1_mixer_controls[] = { SOC_DAPM_SINGLE("DACR", AK4671_ROUT1_SIGNAL_SELECT, 0, 1, 0), SOC_DAPM_SINGLE("RINR1", AK4671_ROUT1_SIGNAL_SELECT, 1, 1, 0), SOC_DAPM_SINGLE("RINR2", AK4671_ROUT1_SIGNAL_SELECT, 2, 1, 0), SOC_DAPM_SINGLE("RINR3", AK4671_ROUT1_SIGNAL_SELECT, 3, 1, 0), SOC_DAPM_SINGLE("RINR4", AK4671_ROUT1_SIGNAL_SELECT, 4, 1, 0), SOC_DAPM_SINGLE("LOOPR", AK4671_ROUT1_SIGNAL_SELECT, 5, 1, 0), }; static const struct snd_kcontrol_new ak4671_lout2_mixer_controls[] = { SOC_DAPM_SINGLE("DACHL", AK4671_LOUT2_SIGNAL_SELECT, 0, 1, 0), SOC_DAPM_SINGLE("LINH1", AK4671_LOUT2_SIGNAL_SELECT, 1, 1, 0), SOC_DAPM_SINGLE("LINH2", AK4671_LOUT2_SIGNAL_SELECT, 2, 1, 0), SOC_DAPM_SINGLE("LINH3", AK4671_LOUT2_SIGNAL_SELECT, 3, 1, 0), SOC_DAPM_SINGLE("LINH4", AK4671_LOUT2_SIGNAL_SELECT, 4, 1, 0), SOC_DAPM_SINGLE("LOOPHL", AK4671_LOUT2_SIGNAL_SELECT, 5, 1, 0), }; static const struct snd_kcontrol_new ak4671_rout2_mixer_controls[] = { SOC_DAPM_SINGLE("DACHR", AK4671_ROUT2_SIGNAL_SELECT, 0, 1, 0), SOC_DAPM_SINGLE("RINH1", AK4671_ROUT2_SIGNAL_SELECT, 1, 1, 0), SOC_DAPM_SINGLE("RINH2", AK4671_ROUT2_SIGNAL_SELECT, 2, 1, 0), SOC_DAPM_SINGLE("RINH3", AK4671_ROUT2_SIGNAL_SELECT, 3, 1, 0), SOC_DAPM_SINGLE("RINH4", AK4671_ROUT2_SIGNAL_SELECT, 4, 1, 0), SOC_DAPM_SINGLE("LOOPHR", AK4671_ROUT2_SIGNAL_SELECT, 5, 1, 0), }; static const struct snd_kcontrol_new ak4671_lout3_mixer_controls[] = { SOC_DAPM_SINGLE("DACSL", AK4671_LOUT3_SIGNAL_SELECT, 0, 1, 0), SOC_DAPM_SINGLE("LINS1", AK4671_LOUT3_SIGNAL_SELECT, 1, 1, 0), SOC_DAPM_SINGLE("LINS2", AK4671_LOUT3_SIGNAL_SELECT, 2, 1, 0), SOC_DAPM_SINGLE("LINS3", AK4671_LOUT3_SIGNAL_SELECT, 3, 1, 0), SOC_DAPM_SINGLE("LINS4", AK4671_LOUT3_SIGNAL_SELECT, 4, 1, 0), SOC_DAPM_SINGLE("LOOPSL", AK4671_LOUT3_SIGNAL_SELECT, 5, 1, 0), }; static const struct snd_kcontrol_new ak4671_rout3_mixer_controls[] = { SOC_DAPM_SINGLE("DACSR", AK4671_ROUT3_SIGNAL_SELECT, 0, 1, 0), SOC_DAPM_SINGLE("RINS1", AK4671_ROUT3_SIGNAL_SELECT, 1, 1, 0), SOC_DAPM_SINGLE("RINS2", AK4671_ROUT3_SIGNAL_SELECT, 2, 1, 0), SOC_DAPM_SINGLE("RINS3", AK4671_ROUT3_SIGNAL_SELECT, 3, 1, 0), SOC_DAPM_SINGLE("RINS4", AK4671_ROUT3_SIGNAL_SELECT, 4, 1, 0), SOC_DAPM_SINGLE("LOOPSR", AK4671_ROUT3_SIGNAL_SELECT, 5, 1, 0), }; /* Input MUXs */ static const char *ak4671_lin_mux_texts[] = {"LIN1", "LIN2", "LIN3", "LIN4"}; static const struct soc_enum ak4671_lin_mux_enum = SOC_ENUM_SINGLE(AK4671_MIC_SIGNAL_SELECT, 0, ARRAY_SIZE(ak4671_lin_mux_texts), ak4671_lin_mux_texts); static const struct snd_kcontrol_new ak4671_lin_mux_control = SOC_DAPM_ENUM("Route", ak4671_lin_mux_enum); static const char *ak4671_rin_mux_texts[] = {"RIN1", "RIN2", "RIN3", "RIN4"}; static const struct soc_enum ak4671_rin_mux_enum = SOC_ENUM_SINGLE(AK4671_MIC_SIGNAL_SELECT, 2, ARRAY_SIZE(ak4671_rin_mux_texts), ak4671_rin_mux_texts); static const struct snd_kcontrol_new ak4671_rin_mux_control = SOC_DAPM_ENUM("Route", ak4671_rin_mux_enum); static const struct snd_soc_dapm_widget ak4671_dapm_widgets[] = { /* Inputs */ SND_SOC_DAPM_INPUT("LIN1"), SND_SOC_DAPM_INPUT("RIN1"), SND_SOC_DAPM_INPUT("LIN2"), SND_SOC_DAPM_INPUT("RIN2"), SND_SOC_DAPM_INPUT("LIN3"), SND_SOC_DAPM_INPUT("RIN3"), SND_SOC_DAPM_INPUT("LIN4"), SND_SOC_DAPM_INPUT("RIN4"), /* Outputs */ SND_SOC_DAPM_OUTPUT("LOUT1"), SND_SOC_DAPM_OUTPUT("ROUT1"), SND_SOC_DAPM_OUTPUT("LOUT2"), SND_SOC_DAPM_OUTPUT("ROUT2"), SND_SOC_DAPM_OUTPUT("LOUT3"), SND_SOC_DAPM_OUTPUT("ROUT3"), /* DAC */ SND_SOC_DAPM_DAC("DAC Left", "Left HiFi Playback", AK4671_AD_DA_POWER_MANAGEMENT, 6, 0), SND_SOC_DAPM_DAC("DAC Right", "Right HiFi Playback", AK4671_AD_DA_POWER_MANAGEMENT, 7, 0), /* ADC */ SND_SOC_DAPM_ADC("ADC Left", "Left HiFi Capture", AK4671_AD_DA_POWER_MANAGEMENT, 4, 0), SND_SOC_DAPM_ADC("ADC Right", "Right HiFi Capture", AK4671_AD_DA_POWER_MANAGEMENT, 5, 0), /* PGA */ SND_SOC_DAPM_PGA("LOUT2 Mix Amp", AK4671_LOUT2_POWER_MANAGERMENT, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("ROUT2 Mix Amp", AK4671_LOUT2_POWER_MANAGERMENT, 6, 0, NULL, 0), SND_SOC_DAPM_PGA("LIN1 Mixing Circuit", AK4671_MIXING_POWER_MANAGEMENT1, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("RIN1 Mixing Circuit", AK4671_MIXING_POWER_MANAGEMENT1, 1, 0, NULL, 0), SND_SOC_DAPM_PGA("LIN2 Mixing Circuit", AK4671_MIXING_POWER_MANAGEMENT1, 2, 0, NULL, 0), SND_SOC_DAPM_PGA("RIN2 Mixing Circuit", AK4671_MIXING_POWER_MANAGEMENT1, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("LIN3 Mixing Circuit", AK4671_MIXING_POWER_MANAGEMENT1, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("RIN3 Mixing Circuit", AK4671_MIXING_POWER_MANAGEMENT1, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("LIN4 Mixing Circuit", AK4671_MIXING_POWER_MANAGEMENT1, 6, 0, NULL, 0), SND_SOC_DAPM_PGA("RIN4 Mixing Circuit", AK4671_MIXING_POWER_MANAGEMENT1, 7, 0, NULL, 0), /* Output Mixers */ SND_SOC_DAPM_MIXER("LOUT1 Mixer", AK4671_LOUT1_POWER_MANAGERMENT, 0, 0, &ak4671_lout1_mixer_controls[0], ARRAY_SIZE(ak4671_lout1_mixer_controls)), SND_SOC_DAPM_MIXER("ROUT1 Mixer", AK4671_LOUT1_POWER_MANAGERMENT, 1, 0, &ak4671_rout1_mixer_controls[0], ARRAY_SIZE(ak4671_rout1_mixer_controls)), SND_SOC_DAPM_MIXER_E("LOUT2 Mixer", AK4671_LOUT2_POWER_MANAGERMENT, 0, 0, &ak4671_lout2_mixer_controls[0], ARRAY_SIZE(ak4671_lout2_mixer_controls), ak4671_out2_event, SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_MIXER_E("ROUT2 Mixer", AK4671_LOUT2_POWER_MANAGERMENT, 1, 0, &ak4671_rout2_mixer_controls[0], ARRAY_SIZE(ak4671_rout2_mixer_controls), ak4671_out2_event, SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_MIXER("LOUT3 Mixer", AK4671_LOUT3_POWER_MANAGERMENT, 0, 0, &ak4671_lout3_mixer_controls[0], ARRAY_SIZE(ak4671_lout3_mixer_controls)), SND_SOC_DAPM_MIXER("ROUT3 Mixer", AK4671_LOUT3_POWER_MANAGERMENT, 1, 0, &ak4671_rout3_mixer_controls[0], ARRAY_SIZE(ak4671_rout3_mixer_controls)), /* Input MUXs */ SND_SOC_DAPM_MUX("LIN MUX", AK4671_AD_DA_POWER_MANAGEMENT, 2, 0, &ak4671_lin_mux_control), SND_SOC_DAPM_MUX("RIN MUX", AK4671_AD_DA_POWER_MANAGEMENT, 3, 0, &ak4671_rin_mux_control), /* Mic Power */ SND_SOC_DAPM_MICBIAS("Mic Bias", AK4671_AD_DA_POWER_MANAGEMENT, 1, 0), /* Supply */ SND_SOC_DAPM_SUPPLY("PMPLL", AK4671_PLL_MODE_SELECT1, 0, 0, NULL, 0), }; static const struct snd_soc_dapm_route ak4671_intercon[] = { {"DAC Left", "NULL", "PMPLL"}, {"DAC Right", "NULL", "PMPLL"}, {"ADC Left", "NULL", "PMPLL"}, {"ADC Right", "NULL", "PMPLL"}, /* Outputs */ {"LOUT1", "NULL", "LOUT1 Mixer"}, {"ROUT1", "NULL", "ROUT1 Mixer"}, {"LOUT2", "NULL", "LOUT2 Mix Amp"}, {"ROUT2", "NULL", "ROUT2 Mix Amp"}, {"LOUT3", "NULL", "LOUT3 Mixer"}, {"ROUT3", "NULL", "ROUT3 Mixer"}, {"LOUT1 Mixer", "DACL", "DAC Left"}, {"ROUT1 Mixer", "DACR", "DAC Right"}, {"LOUT2 Mixer", "DACHL", "DAC Left"}, {"ROUT2 Mixer", "DACHR", "DAC Right"}, {"LOUT2 Mix Amp", "NULL", "LOUT2 Mixer"}, {"ROUT2 Mix Amp", "NULL", "ROUT2 Mixer"}, {"LOUT3 Mixer", "DACSL", "DAC Left"}, {"ROUT3 Mixer", "DACSR", "DAC Right"}, /* Inputs */ {"LIN MUX", "LIN1", "LIN1"}, {"LIN MUX", "LIN2", "LIN2"}, {"LIN MUX", "LIN3", "LIN3"}, {"LIN MUX", "LIN4", "LIN4"}, {"RIN MUX", "RIN1", "RIN1"}, {"RIN MUX", "RIN2", "RIN2"}, {"RIN MUX", "RIN3", "RIN3"}, {"RIN MUX", "RIN4", "RIN4"}, {"LIN1", NULL, "Mic Bias"}, {"RIN1", NULL, "Mic Bias"}, {"LIN2", NULL, "Mic Bias"}, {"RIN2", NULL, "Mic Bias"}, {"ADC Left", "NULL", "LIN MUX"}, {"ADC Right", "NULL", "RIN MUX"}, /* Analog Loops */ {"LIN1 Mixing Circuit", "NULL", "LIN1"}, {"RIN1 Mixing Circuit", "NULL", "RIN1"}, {"LIN2 Mixing Circuit", "NULL", "LIN2"}, {"RIN2 Mixing Circuit", "NULL", "RIN2"}, {"LIN3 Mixing Circuit", "NULL", "LIN3"}, {"RIN3 Mixing Circuit", "NULL", "RIN3"}, {"LIN4 Mixing Circuit", "NULL", "LIN4"}, {"RIN4 Mixing Circuit", "NULL", "RIN4"}, {"LOUT1 Mixer", "LINL1", "LIN1 Mixing Circuit"}, {"ROUT1 Mixer", "RINR1", "RIN1 Mixing Circuit"}, {"LOUT2 Mixer", "LINH1", "LIN1 Mixing Circuit"}, {"ROUT2 Mixer", "RINH1", "RIN1 Mixing Circuit"}, {"LOUT3 Mixer", "LINS1", "LIN1 Mixing Circuit"}, {"ROUT3 Mixer", "RINS1", "RIN1 Mixing Circuit"}, {"LOUT1 Mixer", "LINL2", "LIN2 Mixing Circuit"}, {"ROUT1 Mixer", "RINR2", "RIN2 Mixing Circuit"}, {"LOUT2 Mixer", "LINH2", "LIN2 Mixing Circuit"}, {"ROUT2 Mixer", "RINH2", "RIN2 Mixing Circuit"}, {"LOUT3 Mixer", "LINS2", "LIN2 Mixing Circuit"}, {"ROUT3 Mixer", "RINS2", "RIN2 Mixing Circuit"}, {"LOUT1 Mixer", "LINL3", "LIN3 Mixing Circuit"}, {"ROUT1 Mixer", "RINR3", "RIN3 Mixing Circuit"}, {"LOUT2 Mixer", "LINH3", "LIN3 Mixing Circuit"}, {"ROUT2 Mixer", "RINH3", "RIN3 Mixing Circuit"}, {"LOUT3 Mixer", "LINS3", "LIN3 Mixing Circuit"}, {"ROUT3 Mixer", "RINS3", "RIN3 Mixing Circuit"}, {"LOUT1 Mixer", "LINL4", "LIN4 Mixing Circuit"}, {"ROUT1 Mixer", "RINR4", "RIN4 Mixing Circuit"}, {"LOUT2 Mixer", "LINH4", "LIN4 Mixing Circuit"}, {"ROUT2 Mixer", "RINH4", "RIN4 Mixing Circuit"}, {"LOUT3 Mixer", "LINS4", "LIN4 Mixing Circuit"}, {"ROUT3 Mixer", "RINS4", "RIN4 Mixing Circuit"}, }; static int ak4671_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; u8 fs; fs = snd_soc_read(codec, AK4671_PLL_MODE_SELECT0); fs &= ~AK4671_FS; switch (params_rate(params)) { case 8000: fs |= AK4671_FS_8KHZ; break; case 12000: fs |= AK4671_FS_12KHZ; break; case 16000: fs |= AK4671_FS_16KHZ; break; case 24000: fs |= AK4671_FS_24KHZ; break; case 11025: fs |= AK4671_FS_11_025KHZ; break; case 22050: fs |= AK4671_FS_22_05KHZ; break; case 32000: fs |= AK4671_FS_32KHZ; break; case 44100: fs |= AK4671_FS_44_1KHZ; break; case 48000: fs |= AK4671_FS_48KHZ; break; default: return -EINVAL; } snd_soc_write(codec, AK4671_PLL_MODE_SELECT0, fs); return 0; } static int ak4671_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = dai->codec; u8 pll; pll = snd_soc_read(codec, AK4671_PLL_MODE_SELECT0); pll &= ~AK4671_PLL; switch (freq) { case 11289600: pll |= AK4671_PLL_11_2896MHZ; break; case 12000000: pll |= AK4671_PLL_12MHZ; break; case 12288000: pll |= AK4671_PLL_12_288MHZ; break; case 13000000: pll |= AK4671_PLL_13MHZ; break; case 13500000: pll |= AK4671_PLL_13_5MHZ; break; case 19200000: pll |= AK4671_PLL_19_2MHZ; break; case 24000000: pll |= AK4671_PLL_24MHZ; break; case 26000000: pll |= AK4671_PLL_26MHZ; break; case 27000000: pll |= AK4671_PLL_27MHZ; break; default: return -EINVAL; } snd_soc_write(codec, AK4671_PLL_MODE_SELECT0, pll); return 0; } static int ak4671_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_codec *codec = dai->codec; u8 mode; u8 format; /* set master/slave audio interface */ mode = snd_soc_read(codec, AK4671_PLL_MODE_SELECT1); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: mode |= AK4671_M_S; break; case SND_SOC_DAIFMT_CBM_CFS: mode &= ~(AK4671_M_S); break; default: return -EINVAL; } /* interface format */ format = snd_soc_read(codec, AK4671_FORMAT_SELECT); format &= ~AK4671_DIF; switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: format |= AK4671_DIF_I2S_MODE; break; case SND_SOC_DAIFMT_LEFT_J: format |= AK4671_DIF_MSB_MODE; break; case SND_SOC_DAIFMT_DSP_A: format |= AK4671_DIF_DSP_MODE; format |= AK4671_BCKP; format |= AK4671_MSBS; break; default: return -EINVAL; } /* set mode and format */ snd_soc_write(codec, AK4671_PLL_MODE_SELECT1, mode); snd_soc_write(codec, AK4671_FORMAT_SELECT, format); return 0; } static int ak4671_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { switch (level) { case SND_SOC_BIAS_ON: case SND_SOC_BIAS_PREPARE: case SND_SOC_BIAS_STANDBY: snd_soc_update_bits(codec, AK4671_AD_DA_POWER_MANAGEMENT, AK4671_PMVCM, AK4671_PMVCM); break; case SND_SOC_BIAS_OFF: snd_soc_write(codec, AK4671_AD_DA_POWER_MANAGEMENT, 0x00); break; } codec->dapm.bias_level = level; return 0; } #define AK4671_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |\ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\ SNDRV_PCM_RATE_48000) #define AK4671_FORMATS SNDRV_PCM_FMTBIT_S16_LE static const struct snd_soc_dai_ops ak4671_dai_ops = { .hw_params = ak4671_hw_params, .set_sysclk = ak4671_set_dai_sysclk, .set_fmt = ak4671_set_dai_fmt, }; static struct snd_soc_dai_driver ak4671_dai = { .name = "ak4671-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = AK4671_RATES, .formats = AK4671_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = AK4671_RATES, .formats = AK4671_FORMATS,}, .ops = &ak4671_dai_ops, }; static int ak4671_probe(struct snd_soc_codec *codec) { struct ak4671_priv *ak4671 = snd_soc_codec_get_drvdata(codec); int ret; ret = snd_soc_codec_set_cache_io(codec, 8, 8, ak4671->control_type); if (ret < 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } snd_soc_add_codec_controls(codec, ak4671_snd_controls, ARRAY_SIZE(ak4671_snd_controls)); ak4671_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return ret; } static int ak4671_remove(struct snd_soc_codec *codec) { ak4671_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_ak4671 = { .probe = ak4671_probe, .remove = ak4671_remove, .set_bias_level = ak4671_set_bias_level, .reg_cache_size = AK4671_CACHEREGNUM, .reg_word_size = sizeof(u8), .reg_cache_default = ak4671_reg, .dapm_widgets = ak4671_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(ak4671_dapm_widgets), .dapm_routes = ak4671_intercon, .num_dapm_routes = ARRAY_SIZE(ak4671_intercon), }; static int ak4671_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ak4671_priv *ak4671; int ret; ak4671 = devm_kzalloc(&client->dev, sizeof(struct ak4671_priv), GFP_KERNEL); if (ak4671 == NULL) return -ENOMEM; i2c_set_clientdata(client, ak4671); ak4671->control_type = SND_SOC_I2C; ret = snd_soc_register_codec(&client->dev, &soc_codec_dev_ak4671, &ak4671_dai, 1); return ret; } static int ak4671_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id ak4671_i2c_id[] = { { "ak4671", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ak4671_i2c_id); static struct i2c_driver ak4671_i2c_driver = { .driver = { .name = "ak4671-codec", .owner = THIS_MODULE, }, .probe = ak4671_i2c_probe, .remove = ak4671_i2c_remove, .id_table = ak4671_i2c_id, }; module_i2c_driver(ak4671_i2c_driver); MODULE_DESCRIPTION("ASoC AK4671 codec driver"); MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); MODULE_LICENSE("GPL");
gpl-2.0
CM7lu3000/meteor
arch/sh/kernel/cpu/sh3/clock-sh7712.c
4000
1601
/* * arch/sh/kernel/cpu/sh3/clock-sh7712.c * * SH7712 support for the clock framework * * Copyright (C) 2007 Andrew Murray <amurray@mpc-data.co.uk> * * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c * Copyright (C) 2005 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/clock.h> #include <asm/freq.h> #include <asm/io.h> static int multipliers[] = { 1, 2, 3 }; static int divisors[] = { 1, 2, 3, 4, 6 }; static void master_clk_init(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = (frqcr & 0x0300) >> 8; clk->rate *= multipliers[idx]; } static struct clk_ops sh7712_master_clk_ops = { .init = master_clk_init, }; static unsigned long module_clk_recalc(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = frqcr & 0x0007; return clk->parent->rate / divisors[idx]; } static struct clk_ops sh7712_module_clk_ops = { .recalc = module_clk_recalc, }; static unsigned long cpu_clk_recalc(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = (frqcr & 0x0030) >> 4; return clk->parent->rate / divisors[idx]; } static struct clk_ops sh7712_cpu_clk_ops = { .recalc = cpu_clk_recalc, }; static struct clk_ops *sh7712_clk_ops[] = { &sh7712_master_clk_ops, &sh7712_module_clk_ops, &sh7712_cpu_clk_ops, }; void __init arch_init_clk_ops(struct clk_ops **ops, int idx) { if (idx < ARRAY_SIZE(sh7712_clk_ops)) *ops = sh7712_clk_ops[idx]; }
gpl-2.0
twinsen17/android_kernel_samsung_golden
arch/cris/arch-v32/kernel/fasttimer.c
4768
22909
/* * linux/arch/cris/kernel/fasttimer.c * * Fast timers for ETRAX FS * * Copyright (C) 2000-2006 Axis Communications AB, Lund, Sweden */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <linux/time.h> #include <linux/delay.h> #include <asm/irq.h> #include <asm/system.h> #include <hwregs/reg_map.h> #include <hwregs/reg_rdwr.h> #include <hwregs/timer_defs.h> #include <asm/fasttimer.h> #include <linux/proc_fs.h> /* * timer0 is running at 100MHz and generating jiffies timer ticks * at 100 or 1000 HZ. * fasttimer gives an API that gives timers that expire "between" the jiffies * giving microsecond resolution (10 ns). * fasttimer uses reg_timer_rw_trig register to get interrupt when * r_time reaches a certain value. */ #define DEBUG_LOG_INCLUDED #define FAST_TIMER_LOG /* #define FAST_TIMER_TEST */ #define FAST_TIMER_SANITY_CHECKS #ifdef FAST_TIMER_SANITY_CHECKS static int sanity_failed; #endif #define D1(x) #define D2(x) #define DP(x) static unsigned int fast_timer_running; static unsigned int fast_timers_added; static unsigned int fast_timers_started; static unsigned int fast_timers_expired; static unsigned int fast_timers_deleted; static unsigned int fast_timer_is_init; static unsigned int fast_timer_ints; struct fast_timer *fast_timer_list = NULL; #ifdef DEBUG_LOG_INCLUDED #define DEBUG_LOG_MAX 128 static const char * debug_log_string[DEBUG_LOG_MAX]; static unsigned long debug_log_value[DEBUG_LOG_MAX]; static unsigned int debug_log_cnt; static unsigned int debug_log_cnt_wrapped; #define DEBUG_LOG(string, value) \ { \ unsigned long log_flags; \ local_irq_save(log_flags); \ debug_log_string[debug_log_cnt] = (string); \ debug_log_value[debug_log_cnt] = (unsigned long)(value); \ if (++debug_log_cnt >= DEBUG_LOG_MAX) \ { \ debug_log_cnt = debug_log_cnt % DEBUG_LOG_MAX; \ debug_log_cnt_wrapped = 1; \ } \ local_irq_restore(log_flags); \ } #else #define DEBUG_LOG(string, value) #endif #define NUM_TIMER_STATS 16 #ifdef FAST_TIMER_LOG struct fast_timer timer_added_log[NUM_TIMER_STATS]; struct fast_timer timer_started_log[NUM_TIMER_STATS]; struct fast_timer timer_expired_log[NUM_TIMER_STATS]; #endif int timer_div_settings[NUM_TIMER_STATS]; int timer_delay_settings[NUM_TIMER_STATS]; struct work_struct fast_work; static void timer_trig_handler(struct work_struct *work); /* Not true gettimeofday, only checks the jiffies (uptime) + useconds */ inline void do_gettimeofday_fast(struct fasttime_t *tv) { tv->tv_jiff = jiffies; tv->tv_usec = GET_JIFFIES_USEC(); } inline int fasttime_cmp(struct fasttime_t *t0, struct fasttime_t *t1) { /* Compare jiffies. Takes care of wrapping */ if (time_before(t0->tv_jiff, t1->tv_jiff)) return -1; else if (time_after(t0->tv_jiff, t1->tv_jiff)) return 1; /* Compare us */ if (t0->tv_usec < t1->tv_usec) return -1; else if (t0->tv_usec > t1->tv_usec) return 1; return 0; } /* Called with ints off */ inline void start_timer_trig(unsigned long delay_us) { reg_timer_rw_ack_intr ack_intr = { 0 }; reg_timer_rw_intr_mask intr_mask; reg_timer_rw_trig trig; reg_timer_rw_trig_cfg trig_cfg = { 0 }; reg_timer_r_time r_time0; reg_timer_r_time r_time1; unsigned char trig_wrap; unsigned char time_wrap; r_time0 = REG_RD(timer, regi_timer0, r_time); D1(printk("start_timer_trig : %d us freq: %i div: %i\n", delay_us, freq_index, div)); /* Clear trig irq */ intr_mask = REG_RD(timer, regi_timer0, rw_intr_mask); intr_mask.trig = 0; REG_WR(timer, regi_timer0, rw_intr_mask, intr_mask); /* Set timer values and check if trigger wraps. */ /* r_time is 100MHz (10 ns resolution) */ trig_wrap = (trig = r_time0 + delay_us*(1000/10)) < r_time0; timer_div_settings[fast_timers_started % NUM_TIMER_STATS] = trig; timer_delay_settings[fast_timers_started % NUM_TIMER_STATS] = delay_us; /* Ack interrupt */ ack_intr.trig = 1; REG_WR(timer, regi_timer0, rw_ack_intr, ack_intr); /* Start timer */ REG_WR(timer, regi_timer0, rw_trig, trig); trig_cfg.tmr = regk_timer_time; REG_WR(timer, regi_timer0, rw_trig_cfg, trig_cfg); /* Check if we have already passed the trig time */ r_time1 = REG_RD(timer, regi_timer0, r_time); time_wrap = r_time1 < r_time0; if ((trig_wrap && !time_wrap) || (r_time1 < trig)) { /* No, Enable trig irq */ intr_mask = REG_RD(timer, regi_timer0, rw_intr_mask); intr_mask.trig = 1; REG_WR(timer, regi_timer0, rw_intr_mask, intr_mask); fast_timers_started++; fast_timer_running = 1; } else { /* We have passed the time, disable trig point, ack intr */ trig_cfg.tmr = regk_timer_off; REG_WR(timer, regi_timer0, rw_trig_cfg, trig_cfg); REG_WR(timer, regi_timer0, rw_ack_intr, ack_intr); /* call the int routine */ INIT_WORK(&fast_work, timer_trig_handler); schedule_work(&fast_work); } } /* In version 1.4 this function takes 27 - 50 us */ void start_one_shot_timer(struct fast_timer *t, fast_timer_function_type *function, unsigned long data, unsigned long delay_us, const char *name) { unsigned long flags; struct fast_timer *tmp; D1(printk("sft %s %d us\n", name, delay_us)); local_irq_save(flags); do_gettimeofday_fast(&t->tv_set); tmp = fast_timer_list; #ifdef FAST_TIMER_SANITY_CHECKS /* Check so this is not in the list already... */ while (tmp != NULL) { if (tmp == t) { printk(KERN_DEBUG "timer name: %s data: 0x%08lX already " "in list!\n", name, data); sanity_failed++; goto done; } else tmp = tmp->next; } tmp = fast_timer_list; #endif t->delay_us = delay_us; t->function = function; t->data = data; t->name = name; t->tv_expires.tv_usec = t->tv_set.tv_usec + delay_us % 1000000; t->tv_expires.tv_jiff = t->tv_set.tv_jiff + delay_us / 1000000 / HZ; if (t->tv_expires.tv_usec > 1000000) { t->tv_expires.tv_usec -= 1000000; t->tv_expires.tv_jiff += HZ; } #ifdef FAST_TIMER_LOG timer_added_log[fast_timers_added % NUM_TIMER_STATS] = *t; #endif fast_timers_added++; /* Check if this should timeout before anything else */ if (tmp == NULL || fasttime_cmp(&t->tv_expires, &tmp->tv_expires) < 0) { /* Put first in list and modify the timer value */ t->prev = NULL; t->next = fast_timer_list; if (fast_timer_list) fast_timer_list->prev = t; fast_timer_list = t; #ifdef FAST_TIMER_LOG timer_started_log[fast_timers_started % NUM_TIMER_STATS] = *t; #endif start_timer_trig(delay_us); } else { /* Put in correct place in list */ while (tmp->next && fasttime_cmp(&t->tv_expires, &tmp->next->tv_expires) > 0) tmp = tmp->next; /* Insert t after tmp */ t->prev = tmp; t->next = tmp->next; if (tmp->next) { tmp->next->prev = t; } tmp->next = t; } D2(printk("start_one_shot_timer: %d us done\n", delay_us)); done: local_irq_restore(flags); } /* start_one_shot_timer */ static inline int fast_timer_pending (const struct fast_timer * t) { return (t->next != NULL) || (t->prev != NULL) || (t == fast_timer_list); } static inline int detach_fast_timer (struct fast_timer *t) { struct fast_timer *next, *prev; if (!fast_timer_pending(t)) return 0; next = t->next; prev = t->prev; if (next) next->prev = prev; if (prev) prev->next = next; else fast_timer_list = next; fast_timers_deleted++; return 1; } int del_fast_timer(struct fast_timer * t) { unsigned long flags; int ret; local_irq_save(flags); ret = detach_fast_timer(t); t->next = t->prev = NULL; local_irq_restore(flags); return ret; } /* del_fast_timer */ /* Interrupt routines or functions called in interrupt context */ /* Timer interrupt handler for trig interrupts */ static irqreturn_t timer_trig_interrupt(int irq, void *dev_id) { reg_timer_r_masked_intr masked_intr; /* Check if the timer interrupt is for us (a trig int) */ masked_intr = REG_RD(timer, regi_timer0, r_masked_intr); if (!masked_intr.trig) return IRQ_NONE; timer_trig_handler(NULL); return IRQ_HANDLED; } static void timer_trig_handler(struct work_struct *work) { reg_timer_rw_ack_intr ack_intr = { 0 }; reg_timer_rw_intr_mask intr_mask; reg_timer_rw_trig_cfg trig_cfg = { 0 }; struct fast_timer *t; unsigned long flags; /* We keep interrupts disabled not only when we modify the * fast timer list, but any time we hold a reference to a * timer in the list, since del_fast_timer may be called * from (another) interrupt context. Thus, the only time * when interrupts are enabled is when calling the timer * callback function. */ local_irq_save(flags); /* Clear timer trig interrupt */ intr_mask = REG_RD(timer, regi_timer0, rw_intr_mask); intr_mask.trig = 0; REG_WR(timer, regi_timer0, rw_intr_mask, intr_mask); /* First stop timer, then ack interrupt */ /* Stop timer */ trig_cfg.tmr = regk_timer_off; REG_WR(timer, regi_timer0, rw_trig_cfg, trig_cfg); /* Ack interrupt */ ack_intr.trig = 1; REG_WR(timer, regi_timer0, rw_ack_intr, ack_intr); fast_timer_running = 0; fast_timer_ints++; fast_timer_function_type *f; unsigned long d; t = fast_timer_list; while (t) { struct fasttime_t tv; /* Has it really expired? */ do_gettimeofday_fast(&tv); D1(printk(KERN_DEBUG "t: %is %06ius\n", tv.tv_jiff, tv.tv_usec)); if (fasttime_cmp(&t->tv_expires, &tv) <= 0) { /* Yes it has expired */ #ifdef FAST_TIMER_LOG timer_expired_log[fast_timers_expired % NUM_TIMER_STATS] = *t; #endif fast_timers_expired++; /* Remove this timer before call, since it may reuse the timer */ if (t->prev) t->prev->next = t->next; else fast_timer_list = t->next; if (t->next) t->next->prev = t->prev; t->prev = NULL; t->next = NULL; /* Save function callback data before enabling * interrupts, since the timer may be removed and we * don't know how it was allocated (e.g. ->function * and ->data may become overwritten after deletion * if the timer was stack-allocated). */ f = t->function; d = t->data; if (f != NULL) { /* Run the callback function with interrupts * enabled. */ local_irq_restore(flags); f(d); local_irq_save(flags); } else DEBUG_LOG("!trimertrig %i function==NULL!\n", fast_timer_ints); } else { /* Timer is to early, let's set it again using the normal routines */ D1(printk(".\n")); } t = fast_timer_list; if (t != NULL) { /* Start next timer.. */ long us = 0; struct fasttime_t tv; do_gettimeofday_fast(&tv); /* time_after_eq takes care of wrapping */ if (time_after_eq(t->tv_expires.tv_jiff, tv.tv_jiff)) us = ((t->tv_expires.tv_jiff - tv.tv_jiff) * 1000000 / HZ + t->tv_expires.tv_usec - tv.tv_usec); if (us > 0) { if (!fast_timer_running) { #ifdef FAST_TIMER_LOG timer_started_log[fast_timers_started % NUM_TIMER_STATS] = *t; #endif start_timer_trig(us); } break; } else { /* Timer already expired, let's handle it better late than never. * The normal loop handles it */ D1(printk("e! %d\n", us)); } } } local_irq_restore(flags); if (!t) D1(printk("ttrig stop!\n")); } static void wake_up_func(unsigned long data) { wait_queue_head_t *sleep_wait_p = (wait_queue_head_t*)data; wake_up(sleep_wait_p); } /* Useful API */ void schedule_usleep(unsigned long us) { struct fast_timer t; wait_queue_head_t sleep_wait; init_waitqueue_head(&sleep_wait); D1(printk("schedule_usleep(%d)\n", us)); start_one_shot_timer(&t, wake_up_func, (unsigned long)&sleep_wait, us, "usleep"); /* Uninterruptible sleep on the fast timer. (The condition is * somewhat redundant since the timer is what wakes us up.) */ wait_event(sleep_wait, !fast_timer_pending(&t)); D1(printk("done schedule_usleep(%d)\n", us)); } #ifdef CONFIG_PROC_FS static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len ,int *eof, void *data_unused); static struct proc_dir_entry *fasttimer_proc_entry; #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_PROC_FS /* This value is very much based on testing */ #define BIG_BUF_SIZE (500 + NUM_TIMER_STATS * 300) static int proc_fasttimer_read(char *buf, char **start, off_t offset, int len ,int *eof, void *data_unused) { unsigned long flags; int i = 0; int num_to_show; struct fasttime_t tv; struct fast_timer *t, *nextt; static char *bigbuf = NULL; static unsigned long used; if (!bigbuf) { bigbuf = vmalloc(BIG_BUF_SIZE); if (!bigbuf) { used = 0; if (buf) buf[0] = '\0'; return 0; } } if (!offset || !used) { do_gettimeofday_fast(&tv); used = 0; used += sprintf(bigbuf + used, "Fast timers added: %i\n", fast_timers_added); used += sprintf(bigbuf + used, "Fast timers started: %i\n", fast_timers_started); used += sprintf(bigbuf + used, "Fast timer interrupts: %i\n", fast_timer_ints); used += sprintf(bigbuf + used, "Fast timers expired: %i\n", fast_timers_expired); used += sprintf(bigbuf + used, "Fast timers deleted: %i\n", fast_timers_deleted); used += sprintf(bigbuf + used, "Fast timer running: %s\n", fast_timer_running ? "yes" : "no"); used += sprintf(bigbuf + used, "Current time: %lu.%06lu\n", (unsigned long)tv.tv_jiff, (unsigned long)tv.tv_usec); #ifdef FAST_TIMER_SANITY_CHECKS used += sprintf(bigbuf + used, "Sanity failed: %i\n", sanity_failed); #endif used += sprintf(bigbuf + used, "\n"); #ifdef DEBUG_LOG_INCLUDED { int end_i = debug_log_cnt; i = 0; if (debug_log_cnt_wrapped) i = debug_log_cnt; while ((i != end_i || (debug_log_cnt_wrapped && !used)) && used+100 < BIG_BUF_SIZE) { used += sprintf(bigbuf + used, debug_log_string[i], debug_log_value[i]); i = (i+1) % DEBUG_LOG_MAX; } } used += sprintf(bigbuf + used, "\n"); #endif num_to_show = (fast_timers_started < NUM_TIMER_STATS ? fast_timers_started: NUM_TIMER_STATS); used += sprintf(bigbuf + used, "Timers started: %i\n", fast_timers_started); for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE) ; i++) { int cur = (fast_timers_started - i - 1) % NUM_TIMER_STATS; #if 1 //ndef FAST_TIMER_LOG used += sprintf(bigbuf + used, "div: %i delay: %i" "\n", timer_div_settings[cur], timer_delay_settings[cur] ); #endif #ifdef FAST_TIMER_LOG t = &timer_started_log[cur]; used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data ); #endif } used += sprintf(bigbuf + used, "\n"); #ifdef FAST_TIMER_LOG num_to_show = (fast_timers_added < NUM_TIMER_STATS ? fast_timers_added: NUM_TIMER_STATS); used += sprintf(bigbuf + used, "Timers added: %i\n", fast_timers_added); for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE); i++) { t = &timer_added_log[(fast_timers_added - i - 1) % NUM_TIMER_STATS]; used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data ); } used += sprintf(bigbuf + used, "\n"); num_to_show = (fast_timers_expired < NUM_TIMER_STATS ? fast_timers_expired: NUM_TIMER_STATS); used += sprintf(bigbuf + used, "Timers expired: %i\n", fast_timers_expired); for (i = 0; i < num_to_show && (used+100 < BIG_BUF_SIZE); i++) { t = &timer_expired_log[(fast_timers_expired - i - 1) % NUM_TIMER_STATS]; used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data ); } used += sprintf(bigbuf + used, "\n"); #endif used += sprintf(bigbuf + used, "Active timers:\n"); local_irq_save(flags); t = fast_timer_list; while (t != NULL && (used+100 < BIG_BUF_SIZE)) { nextt = t->next; local_irq_restore(flags); used += sprintf(bigbuf + used, "%-14s s: %6lu.%06lu e: %6lu.%06lu " "d: %6li us data: 0x%08lX" /* " func: 0x%08lX" */ "\n", t->name, (unsigned long)t->tv_set.tv_jiff, (unsigned long)t->tv_set.tv_usec, (unsigned long)t->tv_expires.tv_jiff, (unsigned long)t->tv_expires.tv_usec, t->delay_us, t->data /* , t->function */ ); local_irq_save(flags); if (t->next != nextt) { printk("timer removed!\n"); } t = nextt; } local_irq_restore(flags); } if (used - offset < len) { len = used - offset; } memcpy(buf, bigbuf + offset, len); *start = buf; *eof = 1; return len; } #endif /* PROC_FS */ #ifdef FAST_TIMER_TEST static volatile unsigned long i = 0; static volatile int num_test_timeout = 0; static struct fast_timer tr[10]; static int exp_num[10]; static struct fasttime_t tv_exp[100]; static void test_timeout(unsigned long data) { do_gettimeofday_fast(&tv_exp[data]); exp_num[data] = num_test_timeout; num_test_timeout++; } static void test_timeout1(unsigned long data) { do_gettimeofday_fast(&tv_exp[data]); exp_num[data] = num_test_timeout; if (data < 7) { start_one_shot_timer(&tr[i], test_timeout1, i, 1000, "timeout1"); i++; } num_test_timeout++; } DP( static char buf0[2000]; static char buf1[2000]; static char buf2[2000]; static char buf3[2000]; static char buf4[2000]; ); static char buf5[6000]; static int j_u[1000]; static void fast_timer_test(void) { int prev_num; int j; struct fasttime_t tv, tv0, tv1, tv2; printk("fast_timer_test() start\n"); do_gettimeofday_fast(&tv); for (j = 0; j < 1000; j++) { j_u[j] = GET_JIFFIES_USEC(); } for (j = 0; j < 100; j++) { do_gettimeofday_fast(&tv_exp[j]); } printk(KERN_DEBUG "fast_timer_test() %is %06i\n", tv.tv_jiff, tv.tv_usec); for (j = 0; j < 1000; j++) { printk(KERN_DEBUG "%i %i %i %i %i\n", j_u[j], j_u[j+1], j_u[j+2], j_u[j+3], j_u[j+4]); j += 4; } for (j = 0; j < 100; j++) { printk(KERN_DEBUG "%i.%i %i.%i %i.%i %i.%i %i.%i\n", tv_exp[j].tv_jiff, tv_exp[j].tv_usec, tv_exp[j+1].tv_jiff, tv_exp[j+1].tv_usec, tv_exp[j+2].tv_jiff, tv_exp[j+2].tv_usec, tv_exp[j+3].tv_jiff, tv_exp[j+3].tv_usec, tv_exp[j+4].tv_jiff, tv_exp[j+4].tv_usec); j += 4; } do_gettimeofday_fast(&tv0); start_one_shot_timer(&tr[i], test_timeout, i, 50000, "test0"); DP(proc_fasttimer_read(buf0, NULL, 0, 0, 0)); i++; start_one_shot_timer(&tr[i], test_timeout, i, 70000, "test1"); DP(proc_fasttimer_read(buf1, NULL, 0, 0, 0)); i++; start_one_shot_timer(&tr[i], test_timeout, i, 40000, "test2"); DP(proc_fasttimer_read(buf2, NULL, 0, 0, 0)); i++; start_one_shot_timer(&tr[i], test_timeout, i, 60000, "test3"); DP(proc_fasttimer_read(buf3, NULL, 0, 0, 0)); i++; start_one_shot_timer(&tr[i], test_timeout1, i, 55000, "test4xx"); DP(proc_fasttimer_read(buf4, NULL, 0, 0, 0)); i++; do_gettimeofday_fast(&tv1); proc_fasttimer_read(buf5, NULL, 0, 0, 0); prev_num = num_test_timeout; while (num_test_timeout < i) { if (num_test_timeout != prev_num) prev_num = num_test_timeout; } do_gettimeofday_fast(&tv2); printk(KERN_INFO "Timers started %is %06i\n", tv0.tv_jiff, tv0.tv_usec); printk(KERN_INFO "Timers started at %is %06i\n", tv1.tv_jiff, tv1.tv_usec); printk(KERN_INFO "Timers done %is %06i\n", tv2.tv_jiff, tv2.tv_usec); DP(printk("buf0:\n"); printk(buf0); printk("buf1:\n"); printk(buf1); printk("buf2:\n"); printk(buf2); printk("buf3:\n"); printk(buf3); printk("buf4:\n"); printk(buf4); ); printk("buf5:\n"); printk(buf5); printk("timers set:\n"); for(j = 0; j<i; j++) { struct fast_timer *t = &tr[j]; printk("%-10s set: %6is %06ius exp: %6is %06ius " "data: 0x%08X func: 0x%08X\n", t->name, t->tv_set.tv_jiff, t->tv_set.tv_usec, t->tv_expires.tv_jiff, t->tv_expires.tv_usec, t->data, t->function ); printk(" del: %6ius did exp: %6is %06ius as #%i error: %6li\n", t->delay_us, tv_exp[j].tv_jiff, tv_exp[j].tv_usec, exp_num[j], (tv_exp[j].tv_jiff - t->tv_expires.tv_jiff) * 1000000 + tv_exp[j].tv_usec - t->tv_expires.tv_usec); } proc_fasttimer_read(buf5, NULL, 0, 0, 0); printk("buf5 after all done:\n"); printk(buf5); printk("fast_timer_test() done\n"); } #endif int fast_timer_init(void) { /* For some reason, request_irq() hangs when called froom time_init() */ if (!fast_timer_is_init) { printk("fast_timer_init()\n"); #ifdef CONFIG_PROC_FS fasttimer_proc_entry = create_proc_entry("fasttimer", 0, 0); if (fasttimer_proc_entry) fasttimer_proc_entry->read_proc = proc_fasttimer_read; #endif /* PROC_FS */ if (request_irq(TIMER0_INTR_VECT, timer_trig_interrupt, IRQF_SHARED | IRQF_DISABLED, "fast timer int", &fast_timer_list)) printk(KERN_ERR "err: fasttimer irq\n"); fast_timer_is_init = 1; #ifdef FAST_TIMER_TEST printk("do test\n"); fast_timer_test(); #endif } return 0; } __initcall(fast_timer_init);
gpl-2.0
dexter93/kernel_htc_msm8660
drivers/scsi/sun3_NCR5380.c
5280
93465
/* sun3_NCR5380.c -- adapted from atari_NCR5380.c for the sun3 by Sam Creasey. */ /* * NCR 5380 generic driver routines. These should make it *trivial* * to implement 5380 SCSI drivers under Linux with a non-trantor * architecture. * * Note that these routines also work with NR53c400 family chips. * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 666-5836 * * DISTRIBUTION RELEASE 6. * * For more information, please consult * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * ++roman: To port the 5380 driver to the Atari, I had to do some changes in * this file, too: * * - Some of the debug statements were incorrect (undefined variables and the * like). I fixed that. * * - In information_transfer(), I think a #ifdef was wrong. Looking at the * possible DMA transfer size should also happen for REAL_DMA. I added this * in the #if statement. * * - When using real DMA, information_transfer() should return in a DATAOUT * phase after starting the DMA. It has nothing more to do. * * - The interrupt service routine should run main after end of DMA, too (not * only after RESELECTION interrupts). Additionally, it should _not_ test * for more interrupts after running main, since a DMA process may have * been started and interrupts are turned on now. The new int could happen * inside the execution of NCR5380_intr(), leading to recursive * calls. * * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA * and USLEEP, because these were messing up readability and will never be * needed for Atari SCSI. * * - I've revised the NCR5380_main() calling scheme (relax the 'main_running' * stuff), and 'main' is executed in a bottom half if awoken by an * interrupt. * * - The code was quite cluttered up by "#if (NDEBUG & NDEBUG_*) printk..." * constructs. In my eyes, this made the source rather unreadable, so I * finally replaced that by the *_PRINTK() macros. * */ #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_spi.h> /* * Further development / testing that should be done : * 1. Test linked command handling code after Eric is ready with * the high level code. */ #if (NDEBUG & NDEBUG_LISTS) #define LIST(x,y) \ { printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); \ if ((x)==(y)) udelay(5); } #define REMOVE(w,x,y,z) \ { printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, \ (void*)(w), (void*)(x), (void*)(y), (void*)(z)); \ if ((x)==(y)) udelay(5); } #else #define LIST(x,y) #define REMOVE(w,x,y,z) #endif #ifndef notyet #undef LINKED #endif /* * Design * Issues : * * The other Linux SCSI drivers were written when Linux was Intel PC-only, * and specifically for each board rather than each chip. This makes their * adaptation to platforms like the Mac (Some of which use NCR5380's) * more difficult than it has to be. * * Also, many of the SCSI drivers were written before the command queuing * routines were implemented, meaning their implementations of queued * commands were hacked on rather than designed in from the start. * * When I designed the Linux SCSI drivers I figured that * while having two different SCSI boards in a system might be useful * for debugging things, two of the same type wouldn't be used. * Well, I was wrong and a number of users have mailed me about running * multiple high-performance SCSI boards in a server. * * Finally, when I get questions from users, I have no idea what * revision of my driver they are running. * * This driver attempts to address these problems : * This is a generic 5380 driver. To use it on a different platform, * one simply writes appropriate system specific macros (ie, data * transfer - some PC's will use the I/O bus, 68K's must use * memory mapped) and drops this file in their 'C' wrapper. * * As far as command queueing, two queues are maintained for * each 5380 in the system - commands that haven't been issued yet, * and commands that are currently executing. This means that an * unlimited number of commands may be queued, letting * more commands propagate from the higher driver levels giving higher * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, * allowing multiple commands to propagate all the way to a SCSI-II device * while a command is already executing. * * To solve the multiple-boards-in-the-same-system problem, * there is a separate instance structure for each instance * of a 5380 in the system. So, multiple NCR5380 drivers will * be able to coexist with appropriate changes to the high level * SCSI code. * * A NCR5380_PUBLIC_REVISION macro is provided, with the release * number (updated for each public release) printed by the * NCR5380_print_options command, which should be called from the * wrapper detect function, so that I know what release of the driver * users are using. * * Issues specific to the NCR5380 : * * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead * piece of hardware that requires you to sit in a loop polling for * the REQ signal as long as you are connected. Some devices are * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect * while doing long seek operations. * * The workaround for this is to keep track of devices that have * disconnected. If the device hasn't disconnected, for commands that * should disconnect, we do something like * * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } * * Some tweaking of N and M needs to be done. An algorithm based * on "time to data" would give the best results as long as short time * to datas (ie, on the same track) were considered, however these * broken devices are the exception rather than the rule and I'd rather * spend my time optimizing for the normal case. * * Architecture : * * At the heart of the design is a coroutine, NCR5380_main, * which is started when not running by the interrupt handler, * timer, and queue command function. It attempts to establish * I_T_L or I_T_L_Q nexuses by removing the commands from the * issue queue and calling NCR5380_select() if a nexus * is not established. * * Once a nexus is established, the NCR5380_information_transfer() * phase goes through the various phases as instructed by the target. * if the target goes into MSG IN and sends a DISCONNECT message, * the command structure is placed into the per instance disconnected * queue, and NCR5380_main tries to find more work. If USLEEP * was defined, and the target is idle for too long, the system * will try to sleep. * * If a command has disconnected, eventually an interrupt will trigger, * calling NCR5380_intr() which will in turn call NCR5380_reselect * to reestablish a nexus. This will run main if necessary. * * On command termination, the done function will be called as * appropriate. * * SCSI pointers are maintained in the SCp field of SCSI command * structures, being initialized after the command is connected * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. * Note that in violation of the standard, an implicit SAVE POINTERS operation * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. */ /* * Using this file : * This file a skeleton Linux SCSI driver for the NCR 5380 series * of chips. To use it, you write an architecture specific functions * and macros and include this file in your driver. * * These macros control options : * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically * for commands that return with a CHECK CONDITION status. * * LINKED - if defined, linked commands are supported. * * REAL_DMA - if defined, REAL DMA is used during the data transfer phases. * * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible * * These macros MUST be defined : * * NCR5380_read(register) - read from the specified register * * NCR5380_write(register, value) - write to the specific register * * Either real DMA *or* pseudo DMA may be implemented * REAL functions : * NCR5380_REAL_DMA should be defined if real DMA is to be used. * Note that the DMA setup functions should return the number of bytes * that they were able to program the controller for. * * Also note that generic i386/PC versions of these macros are * available as NCR5380_i386_dma_write_setup, * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. * * NCR5380_dma_write_setup(instance, src, count) - initialize * NCR5380_dma_read_setup(instance, dst, count) - initialize * NCR5380_dma_residual(instance); - residual count * * PSEUDO functions : * NCR5380_pwrite(instance, src, count) * NCR5380_pread(instance, dst, count); * * If nothing specific to this implementation needs doing (ie, with external * hardware), you must also define * * NCR5380_queue_command * NCR5380_reset * NCR5380_abort * NCR5380_proc_info * * to be the global entry points into the specific driver, ie * #define NCR5380_queue_command t128_queue_command. * * If this is not done, the routines will be defined as static functions * with the NCR5380* names and the user must provide a globally * accessible wrapper function. * * The generic driver is initialized by calling NCR5380_init(instance), * after setting the appropriate host specific fields and ID. If the * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, * possible) function may be used. Before the specific driver initialization * code finishes, NCR5380_print_options should be called. */ static struct Scsi_Host *first_instance = NULL; static struct scsi_host_template *the_template = NULL; /* Macros ease life... :-) */ #define SETUP_HOSTDATA(in) \ struct NCR5380_hostdata *hostdata = \ (struct NCR5380_hostdata *)(in)->hostdata #define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) #define NEXT(cmd) ((struct scsi_cmnd *)(cmd)->host_scribble) #define SET_NEXT(cmd, next) ((cmd)->host_scribble = (void *)(next)) #define NEXTADDR(cmd) ((struct scsi_cmnd **)&((cmd)->host_scribble)) #define HOSTNO instance->host_no #define H_NO(cmd) (cmd)->device->host->host_no #define SGADDR(buffer) (void *)(((unsigned long)sg_virt(((buffer))))) #ifdef SUPPORT_TAGS /* * Functions for handling tagged queuing * ===================================== * * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes: * * Using consecutive numbers for the tags is no good idea in my eyes. There * could be wrong re-usings if the counter (8 bit!) wraps and some early * command has been preempted for a long time. My solution: a bitfield for * remembering used tags. * * There's also the problem that each target has a certain queue size, but we * cannot know it in advance :-( We just see a QUEUE_FULL status being * returned. So, in this case, the driver internal queue size assumption is * reduced to the number of active tags if QUEUE_FULL is returned by the * target. The command is returned to the mid-level, but with status changed * to BUSY, since --as I've seen-- the mid-level can't handle QUEUE_FULL * correctly. * * We're also not allowed running tagged commands as long as an untagged * command is active. And REQUEST SENSE commands after a contingent allegiance * condition _must_ be untagged. To keep track whether an untagged command has * been issued, the host->busy array is still employed, as it is without * support for tagged queuing. * * One could suspect that there are possible race conditions between * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(), * which already guaranteed to be running at most once. It is also the only * place where tags/LUNs are allocated. So no other allocation can slip * between that pair, there could only happen a reselection, which can free a * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes * important: the tag bit must be cleared before 'nr_allocated' is decreased. */ /* -1 for TAG_NONE is not possible with unsigned char cmd->tag */ #undef TAG_NONE #define TAG_NONE 0xff /* For the m68k, the number of bits in 'allocated' must be a multiple of 32! */ #if (MAX_TAGS % 32) != 0 #error "MAX_TAGS must be a multiple of 32!" #endif typedef struct { char allocated[MAX_TAGS/8]; int nr_allocated; int queue_size; } TAG_ALLOC; static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */ static void __init init_tags( void ) { int target, lun; TAG_ALLOC *ta; if (!setup_use_tagged_queuing) return; for( target = 0; target < 8; ++target ) { for( lun = 0; lun < 8; ++lun ) { ta = &TagAlloc[target][lun]; memset( &ta->allocated, 0, MAX_TAGS/8 ); ta->nr_allocated = 0; /* At the beginning, assume the maximum queue size we could * support (MAX_TAGS). This value will be decreased if the target * returns QUEUE_FULL status. */ ta->queue_size = MAX_TAGS; } } } /* Check if we can issue a command to this LUN: First see if the LUN is marked * busy by an untagged command. If the command should use tagged queuing, also * check that there is a free tag and the target's queue won't overflow. This * function should be called with interrupts disabled to avoid race * conditions. */ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) { SETUP_HOSTDATA(cmd->device->host); if (hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)) return( 1 ); if (!should_be_tagged || !setup_use_tagged_queuing || !cmd->device->tagged_supported) return( 0 ); if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); return( 1 ); } return( 0 ); } /* Allocate a tag for a command (there are no checks anymore, check_lun_busy() * must be called before!), or reserve the LUN in 'busy' if the command is * untagged. */ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) { SETUP_HOSTDATA(cmd->device->host); /* If we or the target don't support tagged queuing, allocate the LUN for * an untagged command. */ if (!should_be_tagged || !setup_use_tagged_queuing || !cmd->device->tagged_supported) { cmd->tag = TAG_NONE; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged " "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); } else { TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS ); set_bit( cmd->tag, &ta->allocated ); ta->nr_allocated++; TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d " "(now %d tags in use)\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, ta->nr_allocated ); } } /* Mark the tag of command 'cmd' as free, or in case of an untagged command, * unlock the LUN. */ static void cmd_free_tag(struct scsi_cmnd *cmd) { SETUP_HOSTDATA(cmd->device->host); if (cmd->tag == TAG_NONE) { hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); } else if (cmd->tag >= MAX_TAGS) { printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", H_NO(cmd), cmd->tag ); } else { TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; clear_bit( cmd->tag, &ta->allocated ); ta->nr_allocated--; TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun ); } } static void free_all_tags( void ) { int target, lun; TAG_ALLOC *ta; if (!setup_use_tagged_queuing) return; for( target = 0; target < 8; ++target ) { for( lun = 0; lun < 8; ++lun ) { ta = &TagAlloc[target][lun]; memset( &ta->allocated, 0, MAX_TAGS/8 ); ta->nr_allocated = 0; } } } #endif /* SUPPORT_TAGS */ /* * Function : void initialize_SCp(struct scsi_cmnd *cmd) * * Purpose : initialize the saved data pointers for cmd to point to the * start of the buffer. * * Inputs : cmd - struct scsi_cmnd structure to have pointers reset. */ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd) { /* * Initialize the Scsi Pointer field so that all of the commands in the * various queues are valid. */ if (scsi_bufflen(cmd)) { cmd->SCp.buffer = scsi_sglist(cmd); cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.ptr = (char *) SGADDR(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; } else { cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; } } #include <linux/delay.h> #if 1 static struct { unsigned char mask; const char * name;} signals[] = {{ SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" }, { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" }, { SR_SEL, "SEL" }, {0, NULL}}, basrs[] = {{BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}}, icrs[] = {{ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"}, {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, {0, NULL}}, mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"}, {MR_MONITOR_BSY, "MODE MONITOR BSY"}, {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"}, {0, NULL}}; /* * Function : void NCR5380_print(struct Scsi_Host *instance) * * Purpose : print the SCSI bus signals for debugging purposes * * Input : instance - which NCR5380 */ static void NCR5380_print(struct Scsi_Host *instance) { unsigned char status, data, basr, mr, icr, i; unsigned long flags; local_irq_save(flags); data = NCR5380_read(CURRENT_SCSI_DATA_REG); status = NCR5380_read(STATUS_REG); mr = NCR5380_read(MODE_REG); icr = NCR5380_read(INITIATOR_COMMAND_REG); basr = NCR5380_read(BUS_AND_STATUS_REG); local_irq_restore(flags); printk("STATUS_REG: %02x ", status); for (i = 0; signals[i].mask ; ++i) if (status & signals[i].mask) printk(",%s", signals[i].name); printk("\nBASR: %02x ", basr); for (i = 0; basrs[i].mask ; ++i) if (basr & basrs[i].mask) printk(",%s", basrs[i].name); printk("\nICR: %02x ", icr); for (i = 0; icrs[i].mask; ++i) if (icr & icrs[i].mask) printk(",%s", icrs[i].name); printk("\nMODE: %02x ", mr); for (i = 0; mrs[i].mask; ++i) if (mr & mrs[i].mask) printk(",%s", mrs[i].name); printk("\n"); } static struct { unsigned char value; const char *name; } phases[] = { {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, {PHASE_UNKNOWN, "UNKNOWN"}}; /* * Function : void NCR5380_print_phase(struct Scsi_Host *instance) * * Purpose : print the current SCSI phase for debugging purposes * * Input : instance - which NCR5380 */ static void NCR5380_print_phase(struct Scsi_Host *instance) { unsigned char status; int i; status = NCR5380_read(STATUS_REG); if (!(status & SR_REQ)) printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO); else { for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i); printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name); } } #else /* !NDEBUG */ /* dummies... */ __inline__ void NCR5380_print(struct Scsi_Host *instance) { }; __inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { }; #endif /* * ++roman: New scheme of calling NCR5380_main() * * If we're not in an interrupt, we can call our main directly, it cannot be * already running. Else, we queue it on a task queue, if not 'main_running' * tells us that a lower level is already executing it. This way, * 'main_running' needs not be protected in a special way. * * queue_main() is a utility function for putting our main onto the task * queue, if main_running is false. It should be called only from a * interrupt or bottom half. */ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/interrupt.h> static volatile int main_running = 0; static DECLARE_WORK(NCR5380_tqueue, NCR5380_main); static __inline__ void queue_main(void) { if (!main_running) { /* If in interrupt and NCR5380_main() not already running, queue it on the 'immediate' task queue, to be processed immediately after the current interrupt processing has finished. */ schedule_work(&NCR5380_tqueue); } /* else: nothing to do: the running NCR5380_main() will pick up any newly queued command. */ } static inline void NCR5380_all_init (void) { static int done = 0; if (!done) { INI_PRINTK("scsi : NCR5380_all_init()\n"); done = 1; } } /* * Function : void NCR58380_print_options (struct Scsi_Host *instance) * * Purpose : called by probe code indicating the NCR5380 driver * options that were selected. * * Inputs : instance, pointer to this instance. Unused. */ static void __init NCR5380_print_options (struct Scsi_Host *instance) { printk(" generic options" #ifdef AUTOSENSE " AUTOSENSE" #endif #ifdef REAL_DMA " REAL DMA" #endif #ifdef PARITY " PARITY" #endif #ifdef SUPPORT_TAGS " SCSI-2 TAGGED QUEUING" #endif ); printk(" generic release=%d", NCR5380_PUBLIC_RELEASE); } /* * Function : void NCR5380_print_status (struct Scsi_Host *instance) * * Purpose : print commands in the various queues, called from * NCR5380_abort and NCR5380_debug to aid debugging. * * Inputs : instance, pointer to this instance. */ static void NCR5380_print_status (struct Scsi_Host *instance) { char *pr_bfr; char *start; int len; NCR_PRINT(NDEBUG_ANY); NCR_PRINT_PHASE(NDEBUG_ANY); pr_bfr = (char *) __get_free_page(GFP_ATOMIC); if (!pr_bfr) { printk("NCR5380_print_status: no memory for print buffer\n"); return; } len = NCR5380_proc_info(instance, pr_bfr, &start, 0, PAGE_SIZE, 0); pr_bfr[len] = 0; printk("\n%s\n", pr_bfr); free_page((unsigned long) pr_bfr); } /******************************************/ /* * /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED] * * *buffer: I/O buffer * **start: if inout == FALSE pointer into buffer where user read should start * offset: current offset * length: length of buffer * hostno: Scsi_Host host_no * inout: TRUE - user is writing; FALSE - user is reading * * Return the number of bytes read from or written */ #undef SPRINTF #define SPRINTF(fmt,args...) \ do { if (pos + strlen(fmt) + 20 /* slop */ < buffer + length) \ pos += sprintf(pos, fmt , ## args); } while(0) static char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer, int length); static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start, off_t offset, int length, int inout) { char *pos = buffer; struct NCR5380_hostdata *hostdata; struct scsi_cmnd *ptr; unsigned long flags; off_t begin = 0; #define check_offset() \ do { \ if (pos - buffer < offset - begin) { \ begin += pos - buffer; \ pos = buffer; \ } \ } while (0) hostdata = (struct NCR5380_hostdata *)instance->hostdata; if (inout) { /* Has data been written to the file ? */ return(-ENOSYS); /* Currently this is a no-op */ } SPRINTF("NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); check_offset(); local_irq_save(flags); SPRINTF("NCR5380: coroutine is%s running.\n", main_running ? "" : "n't"); check_offset(); if (!hostdata->connected) SPRINTF("scsi%d: no currently connected command\n", HOSTNO); else pos = lprint_Scsi_Cmnd ((struct scsi_cmnd *) hostdata->connected, pos, buffer, length); SPRINTF("scsi%d: issue_queue\n", HOSTNO); check_offset(); for (ptr = (struct scsi_cmnd *) hostdata->issue_queue; ptr; ptr = NEXT(ptr)) { pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length); check_offset(); } SPRINTF("scsi%d: disconnected_queue\n", HOSTNO); check_offset(); for (ptr = (struct scsi_cmnd *) hostdata->disconnected_queue; ptr; ptr = NEXT(ptr)) { pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length); check_offset(); } local_irq_restore(flags); *start = buffer + (offset - begin); if (pos - buffer < offset - begin) return 0; else if (pos - buffer - (offset - begin) < length) return pos - buffer - (offset - begin); return length; } static char *lprint_Scsi_Cmnd(struct scsi_cmnd *cmd, char *pos, char *buffer, int length) { int i, s; unsigned char *command; SPRINTF("scsi%d: destination target %d, lun %d\n", H_NO(cmd), cmd->device->id, cmd->device->lun); SPRINTF(" command = "); command = cmd->cmnd; SPRINTF("%2d (0x%02x)", command[0], command[0]); for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) SPRINTF(" %02x", command[i]); SPRINTF("\n"); return pos; } /* * Function : void NCR5380_init (struct Scsi_Host *instance) * * Purpose : initializes *instance and corresponding 5380 chip. * * Inputs : instance - instantiation of the 5380 driver. * * Notes : I assume that the host, hostno, and id bits have been * set correctly. I don't care about the irq and other fields. * */ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) { int i; SETUP_HOSTDATA(instance); NCR5380_all_init(); hostdata->aborted = 0; hostdata->id_mask = 1 << instance->this_id; hostdata->id_higher_mask = 0; for (i = hostdata->id_mask; i <= 0x80; i <<= 1) if (i > hostdata->id_mask) hostdata->id_higher_mask |= i; for (i = 0; i < 8; ++i) hostdata->busy[i] = 0; #ifdef SUPPORT_TAGS init_tags(); #endif #if defined (REAL_DMA) hostdata->dma_len = 0; #endif hostdata->targets_present = 0; hostdata->connected = NULL; hostdata->issue_queue = NULL; hostdata->disconnected_queue = NULL; hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT; if (!the_template) { the_template = instance->hostt; first_instance = instance; } #ifndef AUTOSENSE if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1)) printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n" " without AUTOSENSE option, contingent allegiance conditions may\n" " be incorrectly cleared.\n", HOSTNO); #endif /* def AUTOSENSE */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_write(SELECT_ENABLE_REG, 0); return 0; } static void NCR5380_exit(struct Scsi_Host *instance) { /* Empty, as we didn't schedule any delayed work */ } /* * Function : int NCR5380_queue_command (struct scsi_cmnd *cmd, * void (*done)(struct scsi_cmnd *)) * * Purpose : enqueues a SCSI command * * Inputs : cmd - SCSI command, done - function called on completion, with * a pointer to the command descriptor. * * Returns : 0 * * Side effects : * cmd is added to the per instance issue_queue, with minor * twiddling done to the host specific fields of cmd. If the * main coroutine is not running, it is restarted. * */ /* Only make static if a wrapper function is used */ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { SETUP_HOSTDATA(cmd->device->host); struct scsi_cmnd *tmp; unsigned long flags; #if (NDEBUG & NDEBUG_NO_WRITE) switch (cmd->cmnd[0]) { case WRITE_6: case WRITE_10: printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", H_NO(cmd)); cmd->result = (DID_ERROR << 16); done(cmd); return 0; } #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ #ifdef NCR5380_STATS # if 0 if (!hostdata->connected && !hostdata->issue_queue && !hostdata->disconnected_queue) { hostdata->timebase = jiffies; } # endif # ifdef NCR5380_STAT_LIMIT if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT) # endif switch (cmd->cmnd[0]) { case WRITE: case WRITE_6: case WRITE_10: hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd); hostdata->pendingw++; break; case READ: case READ_6: case READ_10: hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd); hostdata->pendingr++; break; } #endif /* * We use the host_scribble field as a pointer to the next command * in a queue */ SET_NEXT(cmd, NULL); cmd->scsi_done = done; cmd->result = 0; /* * Insert the cmd into the issue queue. Note that REQUEST SENSE * commands are added to the head of the queue since any command will * clear the contingent allegiance condition that exists and the * sense data is only guaranteed to be valid while the condition exists. */ local_irq_save(flags); /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. * Otherwise a running NCR5380_main may steal the lock. * Lock before actually inserting due to fairness reasons explained in * atari_scsi.c. If we insert first, then it's impossible for this driver * to release the lock. * Stop timer for this command while waiting for the lock, or timeouts * may happen (and they really do), and it's no good if the command doesn't * appear in any of the queues. * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which would * alter queues and touch the lock. */ if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { LIST(cmd, hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = cmd; } else { for (tmp = (struct scsi_cmnd *)hostdata->issue_queue; NEXT(tmp); tmp = NEXT(tmp)) ; LIST(cmd, tmp); SET_NEXT(tmp, cmd); } local_irq_restore(flags); QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* If queue_command() is called from an interrupt (real one or bottom * half), we let queue_main() do the job of taking care about main. If it * is already running, this is a no-op, else main will be queued. * * If we're not in an interrupt, we can call NCR5380_main() * unconditionally, because it cannot be already running. */ if (in_interrupt() || ((flags >> 8) & 7) >= 6) queue_main(); else NCR5380_main(NULL); return 0; } static DEF_SCSI_QCMD(NCR5380_queue_command) /* * Function : NCR5380_main (void) * * Purpose : NCR5380_main is a coroutine that runs as long as more work can * be done on the NCR5380 host adapters in a system. Both * NCR5380_queue_command() and NCR5380_intr() will try to start it * in case it is not running. * * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should * reenable them. This prevents reentrancy and kernel stack overflow. */ static void NCR5380_main (struct work_struct *bl) { struct scsi_cmnd *tmp, *prev; struct Scsi_Host *instance = first_instance; struct NCR5380_hostdata *hostdata = HOSTDATA(instance); int done; unsigned long flags; /* * We run (with interrupts disabled) until we're sure that none of * the host adapters have anything that can be done, at which point * we set main_running to 0 and exit. * * Interrupts are enabled before doing various other internal * instructions, after we've decided that we need to run through * the loop again. * * this should prevent any race conditions. * * ++roman: Just disabling the NCR interrupt isn't sufficient here, * because also a timer int can trigger an abort or reset, which can * alter queues and touch the Falcon lock. */ /* Tell int handlers main() is now already executing. Note that no races are possible here. If an int comes in before 'main_running' is set here, and queues/executes main via the task queue, it doesn't do any harm, just this instance of main won't find any work left to do. */ if (main_running) return; main_running = 1; local_save_flags(flags); do { local_irq_disable(); /* Freeze request queues */ done = 1; if (!hostdata->connected) { MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO ); /* * Search through the issue_queue for a command destined * for a target that's not busy. */ #if (NDEBUG & NDEBUG_LISTS) for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) ; if ((tmp == prev) && tmp) printk(" LOOP\n");/* else printk("\n");*/ #endif for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { #if (NDEBUG & NDEBUG_LISTS) if (prev != tmp) printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun); #endif /* When we find one, remove it from the issue queue. */ /* ++guenther: possible race with Falcon locking */ if ( #ifdef SUPPORT_TAGS !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) #else !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun)) #endif ) { /* ++guenther: just to be sure, this must be atomic */ local_irq_disable(); if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); SET_NEXT(prev, NEXT(tmp)); } else { REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); hostdata->issue_queue = NEXT(tmp); } SET_NEXT(tmp, NULL); /* reenable interrupts after finding one */ local_irq_restore(flags); /* * Attempt to establish an I_T_L nexus here. * On success, instance->hostdata->connected is set. * On failure, we must add the command back to the * issue queue so we can keep trying. */ MAIN_PRINTK("scsi%d: main(): command for target %d " "lun %d removed from issue_queue\n", HOSTNO, tmp->target, tmp->lun); /* * REQUEST SENSE commands are issued without tagged * queueing, even on SCSI-II devices because the * contingent allegiance condition exists for the * entire unit. */ /* ++roman: ...and the standard also requires that * REQUEST SENSE command are untagged. */ #ifdef SUPPORT_TAGS cmd_get_tag( tmp, tmp->cmnd[0] != REQUEST_SENSE ); #endif if (!NCR5380_select(instance, tmp, (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : TAG_NEXT)) { break; } else { local_irq_disable(); LIST(tmp, hostdata->issue_queue); SET_NEXT(tmp, hostdata->issue_queue); hostdata->issue_queue = tmp; #ifdef SUPPORT_TAGS cmd_free_tag( tmp ); #endif local_irq_restore(flags); MAIN_PRINTK("scsi%d: main(): select() failed, " "returned to issue_queue\n", HOSTNO); if (hostdata->connected) break; } } /* if target/lun/target queue is not busy */ } /* for issue_queue */ } /* if (!hostdata->connected) */ if (hostdata->connected #ifdef REAL_DMA && !hostdata->dma_len #endif ) { local_irq_restore(flags); MAIN_PRINTK("scsi%d: main: performing information transfer\n", HOSTNO); NCR5380_information_transfer(instance); MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); done = 0; } } while (!done); /* Better allow ints _after_ 'main_running' has been cleared, else an interrupt could believe we'll pick up the work it left for us, but we won't see it anymore here... */ main_running = 0; local_irq_restore(flags); } #ifdef REAL_DMA /* * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) * * Purpose : Called by interrupt handler when DMA finishes or a phase * mismatch occurs (which would finish the DMA transfer). * * Inputs : instance - this instance of the NCR5380. * */ static void NCR5380_dma_complete( struct Scsi_Host *instance ) { SETUP_HOSTDATA(instance); int transfered; unsigned char **data; volatile int *count; if (!hostdata->connected) { printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " "no connected cmd\n", HOSTNO); return; } DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); if((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { printk("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", HOSTNO); printk("please e-mail sammy@sammy.net with a description of how this\n"); printk("error was produced.\n"); BUG(); } /* make sure we're not stuck in a data phase */ if((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == (BASR_PHASE_MATCH | BASR_ACK)) { printk("scsi%d: BASR %02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG)); printk("scsi%d: bus stuck in data phase -- probably a single byte " "overrun!\n", HOSTNO); printk("not prepared for this error!\n"); printk("please e-mail sammy@sammy.net with a description of how this\n"); printk("error was produced.\n"); BUG(); } (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); transfered = hostdata->dma_len - NCR5380_dma_residual(instance); hostdata->dma_len = 0; data = (unsigned char **) &(hostdata->connected->SCp.ptr); count = &(hostdata->connected->SCp.this_residual); *data += transfered; *count -= transfered; } #endif /* REAL_DMA */ /* * Function : void NCR5380_intr (int irq) * * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses * from the disconnected queue, and restarting NCR5380_main() * as required. * * Inputs : int irq, irq that caused this interrupt. * */ static irqreturn_t NCR5380_intr (int irq, void *dev_id) { struct Scsi_Host *instance = first_instance; int done = 1, handled = 0; unsigned char basr; INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); /* Look for pending interrupts */ basr = NCR5380_read(BUS_AND_STATUS_REG); INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); /* dispatch to appropriate routine if found and done=0 */ if (basr & BASR_IRQ) { NCR_PRINT(NDEBUG_INTR); if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { done = 0; // ENABLE_IRQ(); INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); NCR5380_reselect(instance); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if (basr & BASR_PARITY_ERROR) { INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { /* * The rest of the interrupt conditions can occur only during a * DMA transfer */ #if defined(REAL_DMA) /* * We should only get PHASE MISMATCH and EOP interrupts if we have * DMA enabled, so do a sanity check based on the current setting * of the MODE register. */ if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) && ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) { INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); NCR5380_dma_complete( instance ); done = 0; // ENABLE_IRQ(); } else #endif /* REAL_DMA */ { /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ if (basr & BASR_PHASE_MATCH) INT_PRINTK("scsi%d: unknown interrupt, " "BASR 0x%x, MR 0x%x, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } } /* if !(SELECTION || PARITY) */ handled = 1; } /* BASR & IRQ */ else { printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, " "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif } if (!done) { INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); /* Put a call to NCR5380_main() on the queue... */ queue_main(); } return IRQ_RETVAL(handled); } #ifdef NCR5380_STATS static void collect_stats(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { # ifdef NCR5380_STAT_LIMIT if (scsi_bufflen(cmd) > NCR5380_STAT_LIMIT) # endif switch (cmd->cmnd[0]) { case WRITE: case WRITE_6: case WRITE_10: hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase); /*hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);*/ hostdata->pendingw--; break; case READ: case READ_6: case READ_10: hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase); /*hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);*/ hostdata->pendingr--; break; } } #endif /* * Function : int NCR5380_select(struct Scsi_Host *instance, * struct scsi_cmnd *cmd, int tag); * * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, * including ARBITRATION, SELECTION, and initial message out for * IDENTIFY and queue messages. * * Inputs : instance - instantiation of the 5380 driver on which this * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for * the command that is presently connected. * * Returns : -1 if selection could not execute for some reason, * 0 if selection succeeded or failed because the target * did not respond. * * Side effects : * If bus busy, arbitration failed, etc, NCR5380_select() will exit * with registers as they should have been on entry - ie * SELECT_ENABLE will be set appropriately, the NCR5380 * will cease to drive any SCSI bus signals. * * If successful : I_T_L or I_T_L_Q nexus will be established, * instance->connected will be set to cmd. * SELECT interrupt will be disabled. * * If failed (no target) : cmd->scsi_done() will be called, and the * cmd->result host byte set to DID_BAD_TARGET. */ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, int tag) { SETUP_HOSTDATA(instance); unsigned char tmp[3], phase; unsigned char *data; int len; unsigned long timeout; unsigned long flags; hostdata->restart_select = 0; NCR_PRINT(NDEBUG_ARBITRATION); ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, instance->this_id); /* * Set the phase bits to 0, otherwise the NCR5380 won't drive the * data bus during SELECTION. */ local_irq_save(flags); if (hostdata->connected) { local_irq_restore(flags); return -1; } NCR5380_write(TARGET_COMMAND_REG, 0); /* * Start arbitration. */ NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(MODE_REG, MR_ARBITRATE); local_irq_restore(flags); /* Wait for arbitration logic to complete */ #ifdef NCR_TIMEOUT { unsigned long timeout = jiffies + 2*NCR_TIMEOUT; while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && time_before(jiffies, timeout) && !hostdata->connected) ; if (time_after_eq(jiffies, timeout)) { printk("scsi : arbitration timeout at %d\n", __LINE__); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } } #else /* NCR_TIMEOUT */ while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && !hostdata->connected); #endif ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); return -1; } /* * The arbitration delay is 2.2us, but this is a minimum and there is * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate * the integral nature of udelay(). * */ udelay(3); /* Check for lost arbitration */ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", HOSTNO); return -1; } /* after/during arbitration, BSY should be asserted. IBM DPES-31080 Version S31Q works now */ /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY ) ; if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", HOSTNO); return -1; } /* * Again, bus clear + bus settle time is 1.2us, however, this is * a minimum so we'll udelay ceil(1.2) */ #ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY /* ++roman: But some targets (see above :-) seem to need a bit more... */ udelay(15); #else udelay(2); #endif if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return -1; } ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); /* * Now that we have won arbitration, start Selection process, asserting * the host and target ID's on the SCSI bus. */ NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id))); /* * Raise ATN while SEL is true before BSY goes false from arbitration, * since this is the only way to guarantee that we'll get a MESSAGE OUT * phase immediately after selection. */ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL )); NCR5380_write(MODE_REG, MR_BASE); /* * Reselect interrupts must be turned off prior to the dropping of BSY, * otherwise we will trigger an interrupt. */ if (hostdata->connected) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); return -1; } NCR5380_write(SELECT_ENABLE_REG, 0); /* * The initiator shall then wait at least two deskew delays and release * the BSY signal. */ udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ /* Reset BSY */ NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL)); /* * Something weird happens when we cease to drive BSY - looks * like the board/chip is letting us do another read before the * appropriate propagation delay has expired, and we're confusing * a BSY signal from ourselves as the target's response to SELECTION. * * A small delay (the 'C++' frontend breaks the pipeline with an * unnecessary jump, making it work on my 386-33/Trantor T128, the * tighter 'C' code breaks and requires this) solves the problem - * the 1 us delay is arbitrary, and only used because this delay will * be the same on other platforms and since it works here, it should * work there. * * wingel suggests that this could be due to failing to wait * one deskew delay. */ udelay(1); SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); /* * The SCSI specification calls for a 250 ms timeout for the actual * selection. */ timeout = jiffies + 25; /* * XXX very interesting - we're seeing a bounce where the BSY we * asserted is being reflected / still asserted (propagation delay?) * and it's detecting as true. Sigh. */ #if 0 /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert * IO while SEL is true. But again, there are some disks out the in the * world that do that nevertheless. (Somebody claimed that this announces * reselection capability of the target.) So we better skip that test and * only wait for BSY... (Famous german words: Der Klügere gibt nach :-) */ while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO))); if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_reselect(instance); printk (KERN_ERR "scsi%d: reselection after won arbitration?\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } #else while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY)); #endif /* * No less than two deskew delays after the initiator detects the * BSY signal is true, it shall release the SEL signal and may * change the DATA BUS. -wingel */ udelay(1); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); if (hostdata->targets_present & (1 << cmd->device->id)) { printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); if (hostdata->restart_select) printk(KERN_NOTICE "\trestart select\n"); NCR_PRINT(NDEBUG_ANY); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } cmd->result = DID_BAD_TARGET << 16; #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return 0; } hostdata->targets_present |= (1 << cmd->device->id); /* * Since we followed the SCSI spec, and raised ATN while SEL * was true but before BSY was false during selection, the information * transfer phase should be a MESSAGE OUT phase so that we can send the * IDENTIFY message. * * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG * message (2 bytes) with a tag ID that we increment with every command * until it wraps back to 0. * * XXX - it turns out that there are some broken SCSI-II devices, * which claim to support tagged queuing but fail when more than * some number of commands are issued at once. */ /* Wait for start of REQ/ACK handshake */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)); SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", HOSTNO, cmd->device->id); tmp[0] = IDENTIFY(1, cmd->device->lun); #ifdef SUPPORT_TAGS if (cmd->tag != TAG_NONE) { tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; tmp[2] = cmd->tag; len = 3; } else len = 1; #else len = 1; cmd->tag=0; #endif /* SUPPORT_TAGS */ /* Send message(s) */ data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); /* XXX need to handle errors here */ hostdata->connected = cmd; #ifndef SUPPORT_TAGS hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); #endif #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif initialize_SCp(cmd); return 0; } /* * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using polled I/O * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes are transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * * XXX Note : handling for bus free may be useful. */ /* * Note : this code is not as quick as it could be, however it * IS 100% reliable, and for the actual data transfer where speed * counts, we will always do a pseudo DMA or DMA transfer. */ static int NCR5380_transfer_pio( struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { register unsigned char p = *phase, tmp; register int c = *count; register unsigned char *d = *data; /* * The NCR5380 chip will only drive the SCSI bus when the * phase specified in the appropriate bits of the TARGET COMMAND * REGISTER match the STATUS REGISTER */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); do { /* * Wait for assertion of REQ, after which the phase bits will be * valid */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); /* Check for phase mismatch */ if ((tmp & PHASE_MASK) != p) { PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); NCR_PRINT_PHASE(NDEBUG_PIO); break; } /* Do actual transfer from SCSI bus to / from memory */ if (!(p & SR_IO)) NCR5380_write(OUTPUT_DATA_REG, *d); else *d = NCR5380_read(CURRENT_SCSI_DATA_REG); ++d; /* * The SCSI standard suggests that in MSGOUT phase, the initiator * should drop ATN on the last byte of the message phase * after REQ has been asserted for the handshake but before * the initiator raises ACK. */ if (!(p & SR_IO)) { if (!((p & SR_MSG) && c > 1)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); NCR_PRINT(NDEBUG_PIO); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN); NCR_PRINT(NDEBUG_PIO); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { NCR_PRINT(NDEBUG_PIO); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } while (NCR5380_read(STATUS_REG) & SR_REQ); HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); /* * We have several special cases to consider during REQ/ACK handshaking : * 1. We were in MSGOUT phase, and we are on the last byte of the * message. ATN must be dropped as ACK is dropped. * * 2. We are in a MSGIN phase, and we are on the last byte of the * message. We must exit with ACK asserted, so that the calling * code may raise ATN before dropping ACK to reject the message. * * 3. ACK and ATN are clear and the target may proceed as normal. */ if (!(p == PHASE_MSGIN && c == 1)) { if (p == PHASE_MSGOUT && c > 1) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); else NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); } } while (--c); PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); *count = c; *data = d; tmp = NCR5380_read(STATUS_REG); /* The phase read from the bus is valid if either REQ is (already) * asserted or if ACK hasn't been released yet. The latter is the case if * we're in MSGIN and all wanted bytes have been received. */ if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0)) *phase = tmp & PHASE_MASK; else *phase = PHASE_UNKNOWN; if (!c || (*phase == p)) return 0; else return -1; } /* * Function : do_abort (Scsi_Host *host) * * Purpose : abort the currently established nexus. Should only be * called from a routine which can drop into a * * Returns : 0 on success, -1 on failure. */ static int do_abort (struct Scsi_Host *host) { unsigned char tmp, *msgptr, phase; int len; /* Request message out phase */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); /* * Wait for the target to indicate a valid phase by asserting * REQ. Once this happens, we'll have either a MSGOUT phase * and can immediately send the ABORT message, or we'll have some * other phase and will have to source/sink data. * * We really don't care what value was on the bus or what value * the target sees, so we just handshake. */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); if ((tmp & PHASE_MASK) != PHASE_MSGOUT) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); } tmp = ABORT; msgptr = &tmp; len = 1; phase = PHASE_MSGOUT; NCR5380_transfer_pio (host, &phase, &len, &msgptr); /* * If we got here, and the command completed successfully, * we're about to go into bus free state. */ return len ? -1 : 0; } #if defined(REAL_DMA) /* * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, * unsigned char *phase, int *count, unsigned char **data) * * Purpose : transfers data in given phase using either real * or pseudo DMA. * * Inputs : instance - instance of driver, *phase - pointer to * what phase is expected, *count - pointer to number of * bytes to transfer, **data - pointer to data pointer. * * Returns : -1 when different phase is entered without transferring * maximum number of bytes, 0 if all bytes or transferred or exit * is in same phase. * * Also, *phase, *count, *data are modified in place. * */ static int NCR5380_transfer_dma( struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) { SETUP_HOSTDATA(instance); register int c = *count; register unsigned char p = *phase; unsigned long flags; /* sanity check */ if(!sun3_dma_setup_done) { printk("scsi%d: transfer_dma without setup!\n", HOSTNO); BUG(); } hostdata->dma_len = c; DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", HOSTNO, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", *data); /* netbsd turns off ints here, why not be safe and do it too */ local_irq_save(flags); /* send start chain */ sun3scsi_dma_start(c, *data); if (p & SR_IO) { NCR5380_write(TARGET_COMMAND_REG, 1); NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(INITIATOR_COMMAND_REG, 0); NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); } else { NCR5380_write(TARGET_COMMAND_REG, 0); NCR5380_read(RESET_PARITY_INTERRUPT_REG); NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA); NCR5380_write(MODE_REG, (NCR5380_read(MODE_REG) | MR_DMA_MODE | MR_ENABLE_EOP_INTR)); NCR5380_write(START_DMA_SEND_REG, 0); } #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif local_irq_restore(flags); sun3_dma_active = 1; return 0; } #endif /* defined(REAL_DMA) */ /* * Function : NCR5380_information_transfer (struct Scsi_Host *instance) * * Purpose : run through the various SCSI phases and do as the target * directs us to. Operates on the currently connected command, * instance->connected. * * Inputs : instance, instance for which we are doing commands * * Side effects : SCSI things happen, the disconnected queue will be * modified if a command disconnects, *instance->connected will * change. * * XXX Note : we need to watch for bus free or a reset condition here * to recover from an unexpected bus free condition. */ static void NCR5380_information_transfer (struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned long flags; unsigned char msgout = NOP; int sink = 0; int len; #if defined(REAL_DMA) int transfersize; #endif unsigned char *data; unsigned char phase, tmp, extended_msg[10], old_phase=0xff; struct scsi_cmnd *cmd = (struct scsi_cmnd *) hostdata->connected; #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif while (1) { tmp = NCR5380_read(STATUS_REG); /* We only have a valid SCSI phase when REQ is asserted */ if (tmp & SR_REQ) { phase = (tmp & PHASE_MASK); if (phase != old_phase) { old_phase = phase; NCR_PRINT_PHASE(NDEBUG_INFORMATION); } if(phase == PHASE_CMDOUT) { void *d; unsigned long count; if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { count = cmd->SCp.buffer->length; d = SGADDR(cmd->SCp.buffer); } else { count = cmd->SCp.this_residual; d = cmd->SCp.ptr; } #ifdef REAL_DMA /* this command setup for dma yet? */ if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done != cmd)) { if (cmd->request->cmd_type == REQ_TYPE_FS) { sun3scsi_dma_setup(d, count, rq_data_dir(cmd->request)); sun3_dma_setup_done = cmd; } } #endif #ifdef SUN3_SCSI_VME dregs->csr |= CSR_INTR; #endif } if (sink && (phase != PHASE_MSGOUT)) { NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); while (NCR5380_read(STATUS_REG) & SR_REQ); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 0; continue; } switch (phase) { case PHASE_DATAOUT: #if (NDEBUG & NDEBUG_NO_DATAOUT) printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT " "aborted\n", HOSTNO); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; cmd->scsi_done(cmd); return; #endif case PHASE_DATAIN: /* * If there is no room left in the current buffer in the * scatter-gather list, move onto the next one. */ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", HOSTNO, cmd->SCp.this_residual, cmd->SCp.buffers_residual); } /* * The preferred transfer method is going to be * PSEUDO-DMA for systems that are strictly PIO, * since we can let the hardware do the handshaking. * * For this to work, we need to know the transfersize * ahead of time, since the pseudo-DMA code will sit * in an unconditional loop. */ /* ++roman: I suggest, this should be * #if def(REAL_DMA) * instead of leaving REAL_DMA out. */ #if defined(REAL_DMA) // if (!cmd->device->borken && if((transfersize = NCR5380_dma_xfer_len(instance,cmd,phase)) > SUN3_DMA_MINSIZE) { len = transfersize; cmd->SCp.phase = phase; if (NCR5380_transfer_dma(instance, &phase, &len, (unsigned char **) &cmd->SCp.ptr)) { /* * If the watchdog timer fires, all future * accesses to this device will use the * polled-IO. */ printk(KERN_NOTICE "scsi%d: switching target %d " "lun %d to slow handshake\n", HOSTNO, cmd->device->id, cmd->device->lun); cmd->device->borken = 1; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); sink = 1; do_abort(instance); cmd->result = DID_ERROR << 16; cmd->scsi_done(cmd); /* XXX - need to source or sink data here, as appropriate */ } else { #ifdef REAL_DMA /* ++roman: When using real DMA, * information_transfer() should return after * starting DMA since it has nothing more to * do. */ return; #else cmd->SCp.this_residual -= transfersize - len; #endif } } else #endif /* defined(REAL_DMA) */ NCR5380_transfer_pio(instance, &phase, (int *) &cmd->SCp.this_residual, (unsigned char **) &cmd->SCp.ptr); #ifdef REAL_DMA /* if we had intended to dma that command clear it */ if(sun3_dma_setup_done == cmd) sun3_dma_setup_done = NULL; #endif break; case PHASE_MSGIN: len = 1; data = &tmp; NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */ NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Message = tmp; switch (tmp) { /* * Linking lets us reduce the time required to get the * next command out to the device, hopefully this will * mean we don't waste another revolution due to the delays * required by ARBITRATION and another SELECTION. * * In the current implementation proposal, low level drivers * merely have to start the next command, pointed to by * next_link, done() is called as with unlinked commands. */ #ifdef LINKED case LINKED_CMD_COMPLETE: case LINKED_FLG_CMD_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); LNK_PRINTK("scsi%d: target %d lun %d linked command " "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Sanity check : A linked command should only terminate * with one of these messages if there are more linked * commands available. */ if (!cmd->next_link) { printk(KERN_NOTICE "scsi%d: target %d lun %d " "linked command complete, no next_link\n", HOSTNO, cmd->device->id, cmd->device->lun); sink = 1; do_abort (instance); return; } initialize_SCp(cmd->next_link); /* The next command is still part of this process; copy it * and don't free it! */ cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); LNK_PRINTK("scsi%d: target %d lun %d linked request " "done, calling scsi_done().\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif cmd->scsi_done(cmd); cmd = hostdata->connected; break; #endif /* def LINKED */ case ABORT: case COMMAND_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); hostdata->connected = NULL; QU_PRINTK("scsi%d: command for target %d, lun %d " "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { /* Turn a QUEUE FULL status into BUSY, I think the * mid level cannot handle QUEUE FULL :-( (The * command is retried after BUSY). Also update our * queue size to the number of currently issued * commands now. */ /* ++Andreas: the mid level code knows about QUEUE_FULL now. */ TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; TAG_PRINTK("scsi%d: target %d lun %d returned " "QUEUE_FULL after %d commands\n", HOSTNO, cmd->device->id, cmd->device->lun, ta->nr_allocated); if (ta->queue_size > ta->nr_allocated) ta->nr_allocated = ta->queue_size; } #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * I'm not sure what the correct thing to do here is : * * If the command that just executed is NOT a request * sense, the obvious thing to do is to set the result * code to the values of the stored parameters. * * If it was a REQUEST SENSE command, we need some way to * differentiate between the failure code of the original * and the failure code of the REQUEST sense - the obvious * case is success, where we fall through and leave the * result code unchanged. * * The non-obvious place is where the REQUEST SENSE failed */ if (cmd->cmnd[0] != REQUEST_SENSE) cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); else if (status_byte(cmd->SCp.Status) != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); #ifdef AUTOSENSE if ((cmd->cmnd[0] == REQUEST_SENSE) && hostdata->ses.cmd_len) { scsi_eh_restore_cmnd(cmd, &hostdata->ses); hostdata->ses.cmd_len = 0 ; } if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO); /* this is initialized from initialize_SCp cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; */ local_irq_save(flags); LIST(cmd,hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = (struct scsi_cmnd *) cmd; local_irq_restore(flags); QU_PRINTK("scsi%d: REQUEST SENSE added to head of " "issue queue\n", H_NO(cmd)); } else #endif /* def AUTOSENSE */ { #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif cmd->scsi_done(cmd); } NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); return; case MESSAGE_REJECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); switch (hostdata->last_message) { case HEAD_OF_QUEUE_TAG: case ORDERED_QUEUE_TAG: case SIMPLE_QUEUE_TAG: /* The target obviously doesn't support tagged * queuing, even though it announced this ability in * its INQUIRY data ?!? (maybe only this LUN?) Ok, * clear 'tagged_supported' and lock the LUN, since * the command is treated as untagged further on. */ cmd->device->tagged_supported = 0; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); cmd->tag = TAG_NONE; TAG_PRINTK("scsi%d: target %d lun %d rejected " "QUEUE_TAG message; tagged queuing " "disabled\n", HOSTNO, cmd->device->id, cmd->device->lun); break; } break; case DISCONNECT: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); local_irq_save(flags); cmd->device->disconnect = 1; LIST(cmd,hostdata->disconnected_queue); SET_NEXT(cmd, hostdata->disconnected_queue); hostdata->connected = NULL; hostdata->disconnected_queue = cmd; local_irq_restore(flags); QU_PRINTK("scsi%d: command for target %d lun %d was " "moved from connected to the " "disconnected_queue\n", HOSTNO, cmd->device->id, cmd->device->lun); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. */ NCR5380_write(TARGET_COMMAND_REG, 0); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); /* Wait for bus free to avoid nasty timeouts */ while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) barrier(); #ifdef SUN3_SCSI_VME dregs->csr |= CSR_DMA_ENABLE; #endif return; /* * The SCSI data pointer is *IMPLICITLY* saved on a disconnect * operation, in violation of the SCSI spec so we can safely * ignore SAVE/RESTORE pointers calls. * * Unfortunately, some disks violate the SCSI spec and * don't issue the required SAVE_POINTERS message before * disconnecting, and we have to break spec to remain * compatible. */ case SAVE_POINTERS: case RESTORE_POINTERS: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* Enable reselect interrupts */ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); break; case EXTENDED_MESSAGE: /* * Extended messages are sent in the following format : * Byte * 0 EXTENDED_MESSAGE == 1 * 1 length (includes one byte for code, doesn't * include first two bytes) * 2 code * 3..length+1 arguments * * Start the extended message buffer with the EXTENDED_MESSAGE * byte, since spi_print_msg() wants the whole thing. */ extended_msg[0] = EXTENDED_MESSAGE; /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, (int)extended_msg[1], (int)extended_msg[2]); if (!len && extended_msg[1] <= (sizeof (extended_msg) - 1)) { /* Accept third byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); len = extended_msg[1] - 1; data = extended_msg + 3; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); EXT_PRINTK("scsi%d: message received, residual %d\n", HOSTNO, len); switch (extended_msg[2]) { case EXTENDED_SDTR: case EXTENDED_WDTR: case EXTENDED_MODIFY_DATA_POINTER: case EXTENDED_EXTENDED_IDENTIFY: tmp = 0; } } else if (len) { printk(KERN_NOTICE "scsi%d: error receiving " "extended message\n", HOSTNO); tmp = 0; } else { printk(KERN_NOTICE "scsi%d: extended message " "code %02x length %d is too long\n", HOSTNO, extended_msg[2], extended_msg[1]); tmp = 0; } /* Fall through to reject message */ /* * If we get something weird that we aren't expecting, * reject it. */ default: if (!tmp) { printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO); spi_print_msg(extended_msg); printk("\n"); } else if (tmp != EXTENDED_MESSAGE) printk(KERN_DEBUG "scsi%d: rejecting unknown " "message %02x from target %d, lun %d\n", HOSTNO, tmp, cmd->device->id, cmd->device->lun); else printk(KERN_DEBUG "scsi%d: rejecting unknown " "extended message " "code %02x, length %d from target %d, lun %d\n", HOSTNO, extended_msg[1], extended_msg[0], cmd->device->id, cmd->device->lun); msgout = MESSAGE_REJECT; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); break; } /* switch (tmp) */ break; case PHASE_MSGOUT: len = 1; data = &msgout; hostdata->last_message = msgout; NCR5380_transfer_pio(instance, &phase, &len, &data); if (msgout == ABORT) { #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif hostdata->connected = NULL; cmd->result = DID_ERROR << 16; #ifdef NCR5380_STATS collect_stats(hostdata, cmd); #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return; } msgout = NOP; break; case PHASE_CMDOUT: len = cmd->cmd_len; data = cmd->cmnd; /* * XXX for performance reasons, on machines with a * PSEUDO-DMA architecture we should probably * use the dma transfer function. */ NCR5380_transfer_pio(instance, &phase, &len, &data); break; case PHASE_STATIN: len = 1; data = &tmp; NCR5380_transfer_pio(instance, &phase, &len, &data); cmd->SCp.Status = tmp; break; default: printk("scsi%d: unknown phase\n", HOSTNO); NCR_PRINT(NDEBUG_ANY); } /* switch(phase) */ } /* if (tmp * SR_REQ) */ } /* while (1) */ } /* * Function : void NCR5380_reselect (struct Scsi_Host *instance) * * Purpose : does reselection, initializing the instance->connected * field to point to the struct scsi_cmnd for which the I_T_L or I_T_L_Q * nexus has been reestablished, * * Inputs : instance - this instance of the NCR5380. * */ /* it might eventually prove necessary to do a dma setup on reselection, but it doesn't seem to be needed now -- sam */ static void NCR5380_reselect (struct Scsi_Host *instance) { SETUP_HOSTDATA(instance); unsigned char target_mask; unsigned char lun; #ifdef SUPPORT_TAGS unsigned char tag; #endif unsigned char msg[3]; struct scsi_cmnd *tmp = NULL, *prev; /* unsigned long flags; */ /* * Disable arbitration, etc. since the host adapter obviously * lost, and tell an interrupted NCR5380_select() to restart. */ NCR5380_write(MODE_REG, MR_BASE); hostdata->restart_select = 1; target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); RSL_PRINTK("scsi%d: reselect\n", HOSTNO); /* * At this point, we have detected that our SCSI ID is on the bus, * SEL is true and BSY was false for at least one bus settle delay * (400 ns). * * We must assert BSY ourselves, until the target drops the SEL * signal. */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); while (NCR5380_read(STATUS_REG) & SR_SEL); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); /* * Wait for target to go into MSGIN. */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)); #if 1 // acknowledge toggle to MSGIN NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN)); // peek at the byte without really hitting the bus msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG); #endif if (!(msg[0] & 0x80)) { printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); spi_print_msg(msg); do_abort(instance); return; } lun = (msg[0] & 0x07); /* * Find the command corresponding to the I_T_L or I_T_L_Q nexus we * just reestablished, and remove it from the disconnected queue. */ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) #ifdef SUPPORT_TAGS && (tag == tmp->tag) #endif ) { if (prev) { REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); SET_NEXT(prev, NEXT(tmp)); } else { REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); hostdata->disconnected_queue = NEXT(tmp); } SET_NEXT(tmp, NULL); break; } } if (!tmp) { printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d " #ifdef SUPPORT_TAGS "tag %d " #endif "not in disconnected_queue.\n", HOSTNO, target_mask, lun #ifdef SUPPORT_TAGS , tag #endif ); /* * Since we have an established nexus that we can't do anything * with, we must abort it. */ do_abort(instance); return; } #if 1 /* engage dma setup for the command we just saw */ { void *d; unsigned long count; if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) { count = tmp->SCp.buffer->length; d = SGADDR(tmp->SCp.buffer); } else { count = tmp->SCp.this_residual; d = tmp->SCp.ptr; } #ifdef REAL_DMA /* setup this command for dma if not already */ if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done != tmp)) { sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request)); sun3_dma_setup_done = tmp; } #endif } #endif NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); #ifdef SUPPORT_TAGS /* If the phase is still MSGIN, the target wants to send some more * messages. In case it supports tagged queuing, this is probably a * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. */ tag = TAG_NONE; if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { /* Accept previous IDENTIFY message by clearing ACK */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); len = 2; data = msg+1; if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " "reselection\n", HOSTNO, target_mask, lun, tag); } #endif hostdata->connected = tmp; RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", HOSTNO, tmp->target, tmp->lun, tmp->tag); } /* * Function : int NCR5380_abort(struct scsi_cmnd *cmd) * * Purpose : abort a command * * Inputs : cmd - the struct scsi_cmnd to abort, code - code to set the * host byte of the result field to, if zero DID_ABORTED is * used. * * Returns : 0 - success, -1 on failure. * * XXX - there is no way to abort the command that is currently * connected, you have to wait for it to complete. If this is * a problem, we could implement longjmp() / setjmp(), setjmp() * called where the loop started in NCR5380_main(). */ static int NCR5380_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *instance = cmd->device->host; SETUP_HOSTDATA(instance); struct scsi_cmnd *tmp, **prev; unsigned long flags; printk(KERN_NOTICE "scsi%d: aborting command\n", HOSTNO); scsi_print_command(cmd); NCR5380_print_status (instance); local_irq_save(flags); ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); #if 1 /* * Case 1 : If the command is the currently executing command, * we'll set the aborted flag and return control so that * information transfer routine can exit cleanly. */ if (hostdata->connected == cmd) { ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); /* * We should perform BSY checking, and make sure we haven't slipped * into BUS FREE. */ /* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */ /* * Since we can't change phases until we've completed the current * handshake, we have to source or sink a byte of data if the current * phase is not MSGOUT. */ /* * Return control to the executing NCR drive so we can clear the * aborted flag and get back into our main loop. */ if (do_abort(instance) == 0) { hostdata->aborted = 1; hostdata->connected = NULL; cmd->result = DID_ABORT << 16; #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif local_irq_restore(flags); cmd->scsi_done(cmd); return SCSI_ABORT_SUCCESS; } else { /* local_irq_restore(flags); */ printk("scsi%d: abort of connected command failed!\n", HOSTNO); return SCSI_ABORT_ERROR; } } #endif /* * Case 2 : If the command hasn't been issued yet, we simply remove it * from the issue queue. */ for (prev = (struct scsi_cmnd **) &(hostdata->issue_queue), tmp = (struct scsi_cmnd *) hostdata->issue_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); (*prev) = NEXT(tmp); SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; local_irq_restore(flags); ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", HOSTNO); /* Tagged queuing note: no tag to free here, hasn't been assigned * yet... */ tmp->scsi_done(tmp); return SCSI_ABORT_SUCCESS; } /* * Case 3 : If any commands are connected, we're going to fail the abort * and let the high level SCSI driver retry at a later time or * issue a reset. * * Timeouts, and therefore aborted commands, will be highly unlikely * and handling them cleanly in this situation would make the common * case of noresets less efficient, and would pollute our code. So, * we fail. */ if (hostdata->connected) { local_irq_restore(flags); ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); return SCSI_ABORT_SNOOZE; } /* * Case 4: If the command is currently disconnected from the bus, and * there are no connected commands, we reconnect the I_T_L or * I_T_L_Q nexus associated with it, go into message out, and send * an abort message. * * This case is especially ugly. In order to reestablish the nexus, we * need to call NCR5380_select(). The easiest way to implement this * function was to abort if the bus was busy, and let the interrupt * handler triggered on the SEL for reselect take care of lost arbitrations * where necessary, meaning interrupts need to be enabled. * * When interrupts are enabled, the queues may change - so we * can't remove it from the disconnected queue before selecting it * because that could cause a failure in hashing the nexus if that * device reselected. * * Since the queues may change, we can't use the pointers from when we * first locate it. * * So, we must first locate the command, and if NCR5380_select() * succeeds, then issue the abort, relocate the command and remove * it from the disconnected queue. */ for (tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; tmp = NEXT(tmp)) if (cmd == tmp) { local_irq_restore(flags); ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); if (NCR5380_select (instance, cmd, (int) cmd->tag)) return SCSI_ABORT_BUSY; ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); do_abort (instance); local_irq_save(flags); for (prev = (struct scsi_cmnd **) &(hostdata->disconnected_queue), tmp = (struct scsi_cmnd *) hostdata->disconnected_queue; tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp) ) if (cmd == tmp) { REMOVE(5, *prev, tmp, NEXT(tmp)); *prev = NEXT(tmp); SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; /* We must unlock the tag/LUN immediately here, since the * target goes to BUS FREE and doesn't send us another * message (COMMAND_COMPLETE or the like) */ #ifdef SUPPORT_TAGS cmd_free_tag( tmp ); #else hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); #endif local_irq_restore(flags); tmp->scsi_done(tmp); return SCSI_ABORT_SUCCESS; } } /* * Case 5 : If we reached this point, the command was not found in any of * the queues. * * We probably reached this point because of an unlikely race condition * between the command completing successfully and the abortion code, * so we won't panic, but we will notify the user in case something really * broke. */ local_irq_restore(flags); printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); return SCSI_ABORT_NOT_RUNNING; } /* * Function : int NCR5380_bus_reset(struct scsi_cmnd *cmd) * * Purpose : reset the SCSI bus. * * Returns : SCSI_RESET_WAKEUP * */ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) { SETUP_HOSTDATA(cmd->device->host); int i; unsigned long flags; #if 1 struct scsi_cmnd *connected, *disconnected_queue; #endif NCR5380_print_status (cmd->device->host); /* get in phase */ NCR5380_write( TARGET_COMMAND_REG, PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); /* assert RST */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); udelay (40); /* reset NCR registers */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); NCR5380_write( MODE_REG, MR_BASE ); NCR5380_write( TARGET_COMMAND_REG, 0 ); NCR5380_write( SELECT_ENABLE_REG, 0 ); /* ++roman: reset interrupt condition! otherwise no interrupts don't get * through anymore ... */ (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); #if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ /* XXX see below XXX */ /* MSch: old-style reset: actually abort all command processing here */ /* After the reset, there are no more connected or disconnected commands * and no busy units; to avoid problems with re-inserting the commands * into the issue_queue (via scsi_done()), the aborted commands are * remembered in local variables first. */ local_irq_save(flags); connected = (struct scsi_cmnd *)hostdata->connected; hostdata->connected = NULL; disconnected_queue = (struct scsi_cmnd *)hostdata->disconnected_queue; hostdata->disconnected_queue = NULL; #ifdef SUPPORT_TAGS free_all_tags(); #endif for( i = 0; i < 8; ++i ) hostdata->busy[i] = 0; #ifdef REAL_DMA hostdata->dma_len = 0; #endif local_irq_restore(flags); /* In order to tell the mid-level code which commands were aborted, * set the command status to DID_RESET and call scsi_done() !!! * This ultimately aborts processing of these commands in the mid-level. */ if ((cmd = connected)) { ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done( cmd ); } for (i = 0; (cmd = disconnected_queue); ++i) { disconnected_queue = NEXT(cmd); SET_NEXT(cmd, NULL); cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done( cmd ); } if (i > 0) ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); /* since all commands have been explicitly terminated, we need to tell * the midlevel code that the reset was SUCCESSFUL, and there is no * need to 'wake up' the commands by a request_sense */ return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; #else /* 1 */ /* MSch: new-style reset handling: let the mid-level do what it can */ /* ++guenther: MID-LEVEL IS STILL BROKEN. * Mid-level is supposed to requeue all commands that were active on the * various low-level queues. In fact it does this, but that's not enough * because all these commands are subject to timeout. And if a timeout * happens for any removed command, *_abort() is called but all queues * are now empty. Abort then gives up the falcon lock, which is fatal, * since the mid-level will queue more commands and must have the lock * (it's all happening inside timer interrupt handler!!). * Even worse, abort will return NOT_RUNNING for all those commands not * on any queue, so they won't be retried ... * * Conclusion: either scsi.c disables timeout for all resetted commands * immediately, or we lose! As of linux-2.0.20 it doesn't. */ /* After the reset, there are no more connected or disconnected commands * and no busy units; so clear the low-level status here to avoid * conflicts when the mid-level code tries to wake up the affected * commands! */ if (hostdata->issue_queue) ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); if (hostdata->connected) ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); if (hostdata->disconnected_queue) ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); local_irq_save(flags); hostdata->issue_queue = NULL; hostdata->connected = NULL; hostdata->disconnected_queue = NULL; #ifdef SUPPORT_TAGS free_all_tags(); #endif for( i = 0; i < 8; ++i ) hostdata->busy[i] = 0; #ifdef REAL_DMA hostdata->dma_len = 0; #endif local_irq_restore(flags); /* we did no complete reset of all commands, so a wakeup is required */ return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; #endif /* 1 */ } /* Local Variables: */ /* tab-width: 8 */ /* End: */
gpl-2.0
alvinhochun/sony-nicki-ss-kernel-caf
arch/h8300/mm/kmap.c
6816
1228
/* * linux/arch/h8300/mm/kmap.c * * Based on * linux/arch/m68knommu/mm/kmap.c * * Copyright (C) 2000 Lineo, <davidm@snapgear.com> * Copyright (C) 2000-2002 David McCullough <davidm@snapgear.com> */ #include <linux/mm.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/io.h> #undef DEBUG #define VIRT_OFFSET (0x01000000) /* * Map some physical address range into the kernel address space. */ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) { return (void *)(physaddr + VIRT_OFFSET); } /* * Unmap a ioremap()ed region again. */ void iounmap(void *addr) { } /* * __iounmap unmaps nearly everything, so be careful * it doesn't free currently pointer/page tables anymore but it * wans't used anyway and might be added later. */ void __iounmap(void *addr, unsigned long size) { } /* * Set new cache mode for some kernel address space. * The caller must push data for that range itself, if such data may already * be in the cache. */ void kernel_set_cachemode(void *addr, unsigned long size, int cmode) { }
gpl-2.0
kannu1994/sgs2_kernel
drivers/ide/ide-pm.c
8352
6685
#include <linux/kernel.h> #include <linux/gfp.h> #include <linux/ide.h> int generic_ide_suspend(struct device *dev, pm_message_t mesg) { ide_drive_t *drive = dev_get_drvdata(dev); ide_drive_t *pair = ide_get_pair_dev(drive); ide_hwif_t *hwif = drive->hwif; struct request *rq; struct request_pm_state rqpm; int ret; if (ide_port_acpi(hwif)) { /* call ACPI _GTM only once */ if ((drive->dn & 1) == 0 || pair == NULL) ide_acpi_get_timing(hwif); } memset(&rqpm, 0, sizeof(rqpm)); rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq->cmd_type = REQ_TYPE_PM_SUSPEND; rq->special = &rqpm; rqpm.pm_step = IDE_PM_START_SUSPEND; if (mesg.event == PM_EVENT_PRETHAW) mesg.event = PM_EVENT_FREEZE; rqpm.pm_state = mesg.event; ret = blk_execute_rq(drive->queue, NULL, rq, 0); blk_put_request(rq); if (ret == 0 && ide_port_acpi(hwif)) { /* call ACPI _PS3 only after both devices are suspended */ if ((drive->dn & 1) || pair == NULL) ide_acpi_set_state(hwif, 0); } return ret; } int generic_ide_resume(struct device *dev) { ide_drive_t *drive = dev_get_drvdata(dev); ide_drive_t *pair = ide_get_pair_dev(drive); ide_hwif_t *hwif = drive->hwif; struct request *rq; struct request_pm_state rqpm; int err; if (ide_port_acpi(hwif)) { /* call ACPI _PS0 / _STM only once */ if ((drive->dn & 1) == 0 || pair == NULL) { ide_acpi_set_state(hwif, 1); ide_acpi_push_timing(hwif); } ide_acpi_exec_tfs(drive); } memset(&rqpm, 0, sizeof(rqpm)); rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq->cmd_type = REQ_TYPE_PM_RESUME; rq->cmd_flags |= REQ_PREEMPT; rq->special = &rqpm; rqpm.pm_step = IDE_PM_START_RESUME; rqpm.pm_state = PM_EVENT_ON; err = blk_execute_rq(drive->queue, NULL, rq, 1); blk_put_request(rq); if (err == 0 && dev->driver) { struct ide_driver *drv = to_ide_driver(dev->driver); if (drv->resume) drv->resume(drive); } return err; } void ide_complete_power_step(ide_drive_t *drive, struct request *rq) { struct request_pm_state *pm = rq->special; #ifdef DEBUG_PM printk(KERN_INFO "%s: complete_power_step(step: %d)\n", drive->name, pm->pm_step); #endif if (drive->media != ide_disk) return; switch (pm->pm_step) { case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ if (pm->pm_state == PM_EVENT_FREEZE) pm->pm_step = IDE_PM_COMPLETED; else pm->pm_step = IDE_PM_STANDBY; break; case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ pm->pm_step = IDE_PM_COMPLETED; break; case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ pm->pm_step = IDE_PM_IDLE; break; case IDE_PM_IDLE: /* Resume step 2 (idle)*/ pm->pm_step = IDE_PM_RESTORE_DMA; break; } } ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) { struct request_pm_state *pm = rq->special; struct ide_cmd cmd = { }; switch (pm->pm_step) { case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ if (drive->media != ide_disk) break; /* Not supported? Switch to next step now. */ if (ata_id_flush_enabled(drive->id) == 0 || (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) { ide_complete_power_step(drive, rq); return ide_stopped; } if (ata_id_flush_ext_enabled(drive->id)) cmd.tf.command = ATA_CMD_FLUSH_EXT; else cmd.tf.command = ATA_CMD_FLUSH; goto out_do_tf; case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ cmd.tf.command = ATA_CMD_STANDBYNOW1; goto out_do_tf; case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ ide_set_max_pio(drive); /* * skip IDE_PM_IDLE for ATAPI devices */ if (drive->media != ide_disk) pm->pm_step = IDE_PM_RESTORE_DMA; else ide_complete_power_step(drive, rq); return ide_stopped; case IDE_PM_IDLE: /* Resume step 2 (idle) */ cmd.tf.command = ATA_CMD_IDLEIMMEDIATE; goto out_do_tf; case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */ /* * Right now, all we do is call ide_set_dma(drive), * we could be smarter and check for current xfer_speed * in struct drive etc... */ if (drive->hwif->dma_ops == NULL) break; /* * TODO: respect IDE_DFLAG_USING_DMA */ ide_set_dma(drive); break; } pm->pm_step = IDE_PM_COMPLETED; return ide_stopped; out_do_tf: cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; cmd.protocol = ATA_PROT_NODATA; return do_rw_taskfile(drive, &cmd); } /** * ide_complete_pm_rq - end the current Power Management request * @drive: target drive * @rq: request * * This function cleans up the current PM request and stops the queue * if necessary. */ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) { struct request_queue *q = drive->queue; struct request_pm_state *pm = rq->special; unsigned long flags; ide_complete_power_step(drive, rq); if (pm->pm_step != IDE_PM_COMPLETED) return; #ifdef DEBUG_PM printk("%s: completing PM request, %s\n", drive->name, (rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume"); #endif spin_lock_irqsave(q->queue_lock, flags); if (rq->cmd_type == REQ_TYPE_PM_SUSPEND) blk_stop_queue(q); else drive->dev_flags &= ~IDE_DFLAG_BLOCKED; spin_unlock_irqrestore(q->queue_lock, flags); drive->hwif->rq = NULL; if (blk_end_request(rq, 0, 0)) BUG(); } void ide_check_pm_state(ide_drive_t *drive, struct request *rq) { struct request_pm_state *pm = rq->special; if (rq->cmd_type == REQ_TYPE_PM_SUSPEND && pm->pm_step == IDE_PM_START_SUSPEND) /* Mark drive blocked when starting the suspend sequence. */ drive->dev_flags |= IDE_DFLAG_BLOCKED; else if (rq->cmd_type == REQ_TYPE_PM_RESUME && pm->pm_step == IDE_PM_START_RESUME) { /* * The first thing we do on wakeup is to wait for BSY bit to * go away (with a looong timeout) as a drive on this hwif may * just be POSTing itself. * We do that before even selecting as the "other" device on * the bus may be broken enough to walk on our toes at this * point. */ ide_hwif_t *hwif = drive->hwif; const struct ide_tp_ops *tp_ops = hwif->tp_ops; struct request_queue *q = drive->queue; unsigned long flags; int rc; #ifdef DEBUG_PM printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); #endif rc = ide_wait_not_busy(hwif, 35000); if (rc) printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); tp_ops->dev_select(drive); tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); rc = ide_wait_not_busy(hwif, 100000); if (rc) printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); spin_lock_irqsave(q->queue_lock, flags); blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } }
gpl-2.0
ashwinr64/furnace-condor
sound/isa/gus/gus_io.c
14752
17906
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * I/O routines for GF1/InterWave synthesizer chips * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/time.h> #include <sound/core.h> #include <sound/gus.h> void snd_gf1_delay(struct snd_gus_card * gus) { int i; for (i = 0; i < 6; i++) { mb(); inb(GUSP(gus, DRAM)); } } /* * ======================================================================= */ /* * ok.. stop of control registers (wave & ramp) need some special things.. * big UltraClick (tm) elimination... */ static inline void __snd_gf1_ctrl_stop(struct snd_gus_card * gus, unsigned char reg) { unsigned char value; outb(reg | 0x80, gus->gf1.reg_regsel); mb(); value = inb(gus->gf1.reg_data8); mb(); outb(reg, gus->gf1.reg_regsel); mb(); outb((value | 0x03) & ~(0x80 | 0x20), gus->gf1.reg_data8); mb(); } static inline void __snd_gf1_write8(struct snd_gus_card * gus, unsigned char reg, unsigned char data) { outb(reg, gus->gf1.reg_regsel); mb(); outb(data, gus->gf1.reg_data8); mb(); } static inline unsigned char __snd_gf1_look8(struct snd_gus_card * gus, unsigned char reg) { outb(reg, gus->gf1.reg_regsel); mb(); return inb(gus->gf1.reg_data8); } static inline void __snd_gf1_write16(struct snd_gus_card * gus, unsigned char reg, unsigned int data) { outb(reg, gus->gf1.reg_regsel); mb(); outw((unsigned short) data, gus->gf1.reg_data16); mb(); } static inline unsigned short __snd_gf1_look16(struct snd_gus_card * gus, unsigned char reg) { outb(reg, gus->gf1.reg_regsel); mb(); return inw(gus->gf1.reg_data16); } static inline void __snd_gf1_adlib_write(struct snd_gus_card * gus, unsigned char reg, unsigned char data) { outb(reg, gus->gf1.reg_timerctrl); inb(gus->gf1.reg_timerctrl); inb(gus->gf1.reg_timerctrl); outb(data, gus->gf1.reg_timerdata); inb(gus->gf1.reg_timerctrl); inb(gus->gf1.reg_timerctrl); } static inline void __snd_gf1_write_addr(struct snd_gus_card * gus, unsigned char reg, unsigned int addr, int w_16bit) { if (gus->gf1.enh_mode) { if (w_16bit) addr = ((addr >> 1) & ~0x0000000f) | (addr & 0x0000000f); __snd_gf1_write8(gus, SNDRV_GF1_VB_UPPER_ADDRESS, (unsigned char) ((addr >> 26) & 0x03)); } else if (w_16bit) addr = (addr & 0x00c0000f) | ((addr & 0x003ffff0) >> 1); __snd_gf1_write16(gus, reg, (unsigned short) (addr >> 11)); __snd_gf1_write16(gus, reg + 1, (unsigned short) (addr << 5)); } static inline unsigned int __snd_gf1_read_addr(struct snd_gus_card * gus, unsigned char reg, short w_16bit) { unsigned int res; res = ((unsigned int) __snd_gf1_look16(gus, reg | 0x80) << 11) & 0xfff800; res |= ((unsigned int) __snd_gf1_look16(gus, (reg + 1) | 0x80) >> 5) & 0x0007ff; if (gus->gf1.enh_mode) { res |= (unsigned int) __snd_gf1_look8(gus, SNDRV_GF1_VB_UPPER_ADDRESS | 0x80) << 26; if (w_16bit) res = ((res << 1) & 0xffffffe0) | (res & 0x0000000f); } else if (w_16bit) res = ((res & 0x001ffff0) << 1) | (res & 0x00c0000f); return res; } /* * ======================================================================= */ void snd_gf1_ctrl_stop(struct snd_gus_card * gus, unsigned char reg) { __snd_gf1_ctrl_stop(gus, reg); } void snd_gf1_write8(struct snd_gus_card * gus, unsigned char reg, unsigned char data) { __snd_gf1_write8(gus, reg, data); } unsigned char snd_gf1_look8(struct snd_gus_card * gus, unsigned char reg) { return __snd_gf1_look8(gus, reg); } void snd_gf1_write16(struct snd_gus_card * gus, unsigned char reg, unsigned int data) { __snd_gf1_write16(gus, reg, data); } unsigned short snd_gf1_look16(struct snd_gus_card * gus, unsigned char reg) { return __snd_gf1_look16(gus, reg); } void snd_gf1_adlib_write(struct snd_gus_card * gus, unsigned char reg, unsigned char data) { __snd_gf1_adlib_write(gus, reg, data); } void snd_gf1_write_addr(struct snd_gus_card * gus, unsigned char reg, unsigned int addr, short w_16bit) { __snd_gf1_write_addr(gus, reg, addr, w_16bit); } unsigned int snd_gf1_read_addr(struct snd_gus_card * gus, unsigned char reg, short w_16bit) { return __snd_gf1_read_addr(gus, reg, w_16bit); } /* */ void snd_gf1_i_ctrl_stop(struct snd_gus_card * gus, unsigned char reg) { unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); __snd_gf1_ctrl_stop(gus, reg); spin_unlock_irqrestore(&gus->reg_lock, flags); } void snd_gf1_i_write8(struct snd_gus_card * gus, unsigned char reg, unsigned char data) { unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); __snd_gf1_write8(gus, reg, data); spin_unlock_irqrestore(&gus->reg_lock, flags); } unsigned char snd_gf1_i_look8(struct snd_gus_card * gus, unsigned char reg) { unsigned long flags; unsigned char res; spin_lock_irqsave(&gus->reg_lock, flags); res = __snd_gf1_look8(gus, reg); spin_unlock_irqrestore(&gus->reg_lock, flags); return res; } void snd_gf1_i_write16(struct snd_gus_card * gus, unsigned char reg, unsigned int data) { unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); __snd_gf1_write16(gus, reg, data); spin_unlock_irqrestore(&gus->reg_lock, flags); } unsigned short snd_gf1_i_look16(struct snd_gus_card * gus, unsigned char reg) { unsigned long flags; unsigned short res; spin_lock_irqsave(&gus->reg_lock, flags); res = __snd_gf1_look16(gus, reg); spin_unlock_irqrestore(&gus->reg_lock, flags); return res; } #if 0 void snd_gf1_i_adlib_write(struct snd_gus_card * gus, unsigned char reg, unsigned char data) { unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); __snd_gf1_adlib_write(gus, reg, data); spin_unlock_irqrestore(&gus->reg_lock, flags); } void snd_gf1_i_write_addr(struct snd_gus_card * gus, unsigned char reg, unsigned int addr, short w_16bit) { unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); __snd_gf1_write_addr(gus, reg, addr, w_16bit); spin_unlock_irqrestore(&gus->reg_lock, flags); } #endif /* 0 */ #ifdef CONFIG_SND_DEBUG static unsigned int snd_gf1_i_read_addr(struct snd_gus_card * gus, unsigned char reg, short w_16bit) { unsigned int res; unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); res = __snd_gf1_read_addr(gus, reg, w_16bit); spin_unlock_irqrestore(&gus->reg_lock, flags); return res; } #endif /* */ void snd_gf1_dram_addr(struct snd_gus_card * gus, unsigned int addr) { outb(0x43, gus->gf1.reg_regsel); mb(); outw((unsigned short) addr, gus->gf1.reg_data16); mb(); outb(0x44, gus->gf1.reg_regsel); mb(); outb((unsigned char) (addr >> 16), gus->gf1.reg_data8); mb(); } void snd_gf1_poke(struct snd_gus_card * gus, unsigned int addr, unsigned char data) { unsigned long flags; spin_lock_irqsave(&gus->reg_lock, flags); outb(SNDRV_GF1_GW_DRAM_IO_LOW, gus->gf1.reg_regsel); mb(); outw((unsigned short) addr, gus->gf1.reg_data16); mb(); outb(SNDRV_GF1_GB_DRAM_IO_HIGH, gus->gf1.reg_regsel); mb(); outb((unsigned char) (addr >> 16), gus->gf1.reg_data8); mb(); outb(data, gus->gf1.reg_dram); spin_unlock_irqrestore(&gus->reg_lock, flags); } unsigned char snd_gf1_peek(struct snd_gus_card * gus, unsigned int addr) { unsigned long flags; unsigned char res; spin_lock_irqsave(&gus->reg_lock, flags); outb(SNDRV_GF1_GW_DRAM_IO_LOW, gus->gf1.reg_regsel); mb(); outw((unsigned short) addr, gus->gf1.reg_data16); mb(); outb(SNDRV_GF1_GB_DRAM_IO_HIGH, gus->gf1.reg_regsel); mb(); outb((unsigned char) (addr >> 16), gus->gf1.reg_data8); mb(); res = inb(gus->gf1.reg_dram); spin_unlock_irqrestore(&gus->reg_lock, flags); return res; } #if 0 void snd_gf1_pokew(struct snd_gus_card * gus, unsigned int addr, unsigned short data) { unsigned long flags; #ifdef CONFIG_SND_DEBUG if (!gus->interwave) snd_printk(KERN_DEBUG "snd_gf1_pokew - GF1!!!\n"); #endif spin_lock_irqsave(&gus->reg_lock, flags); outb(SNDRV_GF1_GW_DRAM_IO_LOW, gus->gf1.reg_regsel); mb(); outw((unsigned short) addr, gus->gf1.reg_data16); mb(); outb(SNDRV_GF1_GB_DRAM_IO_HIGH, gus->gf1.reg_regsel); mb(); outb((unsigned char) (addr >> 16), gus->gf1.reg_data8); mb(); outb(SNDRV_GF1_GW_DRAM_IO16, gus->gf1.reg_regsel); mb(); outw(data, gus->gf1.reg_data16); spin_unlock_irqrestore(&gus->reg_lock, flags); } unsigned short snd_gf1_peekw(struct snd_gus_card * gus, unsigned int addr) { unsigned long flags; unsigned short res; #ifdef CONFIG_SND_DEBUG if (!gus->interwave) snd_printk(KERN_DEBUG "snd_gf1_peekw - GF1!!!\n"); #endif spin_lock_irqsave(&gus->reg_lock, flags); outb(SNDRV_GF1_GW_DRAM_IO_LOW, gus->gf1.reg_regsel); mb(); outw((unsigned short) addr, gus->gf1.reg_data16); mb(); outb(SNDRV_GF1_GB_DRAM_IO_HIGH, gus->gf1.reg_regsel); mb(); outb((unsigned char) (addr >> 16), gus->gf1.reg_data8); mb(); outb(SNDRV_GF1_GW_DRAM_IO16, gus->gf1.reg_regsel); mb(); res = inw(gus->gf1.reg_data16); spin_unlock_irqrestore(&gus->reg_lock, flags); return res; } void snd_gf1_dram_setmem(struct snd_gus_card * gus, unsigned int addr, unsigned short value, unsigned int count) { unsigned long port; unsigned long flags; #ifdef CONFIG_SND_DEBUG if (!gus->interwave) snd_printk(KERN_DEBUG "snd_gf1_dram_setmem - GF1!!!\n"); #endif addr &= ~1; count >>= 1; port = GUSP(gus, GF1DATALOW); spin_lock_irqsave(&gus->reg_lock, flags); outb(SNDRV_GF1_GW_DRAM_IO_LOW, gus->gf1.reg_regsel); mb(); outw((unsigned short) addr, gus->gf1.reg_data16); mb(); outb(SNDRV_GF1_GB_DRAM_IO_HIGH, gus->gf1.reg_regsel); mb(); outb((unsigned char) (addr >> 16), gus->gf1.reg_data8); mb(); outb(SNDRV_GF1_GW_DRAM_IO16, gus->gf1.reg_regsel); while (count--) outw(value, port); spin_unlock_irqrestore(&gus->reg_lock, flags); } #endif /* 0 */ void snd_gf1_select_active_voices(struct snd_gus_card * gus) { unsigned short voices; static unsigned short voices_tbl[32 - 14 + 1] = { 44100, 41160, 38587, 36317, 34300, 32494, 30870, 29400, 28063, 26843, 25725, 24696, 23746, 22866, 22050, 21289, 20580, 19916, 19293 }; voices = gus->gf1.active_voices; if (voices > 32) voices = 32; if (voices < 14) voices = 14; if (gus->gf1.enh_mode) voices = 32; gus->gf1.active_voices = voices; gus->gf1.playback_freq = gus->gf1.enh_mode ? 44100 : voices_tbl[voices - 14]; if (!gus->gf1.enh_mode) { snd_gf1_i_write8(gus, SNDRV_GF1_GB_ACTIVE_VOICES, 0xc0 | (voices - 1)); udelay(100); } } #ifdef CONFIG_SND_DEBUG void snd_gf1_print_voice_registers(struct snd_gus_card * gus) { unsigned char mode; int voice, ctrl; voice = gus->gf1.active_voice; printk(KERN_INFO " -%i- GF1 voice ctrl, ramp ctrl = 0x%x, 0x%x\n", voice, ctrl = snd_gf1_i_read8(gus, 0), snd_gf1_i_read8(gus, 0x0d)); printk(KERN_INFO " -%i- GF1 frequency = 0x%x\n", voice, snd_gf1_i_read16(gus, 1)); printk(KERN_INFO " -%i- GF1 loop start, end = 0x%x (0x%x), 0x%x (0x%x)\n", voice, snd_gf1_i_read_addr(gus, 2, ctrl & 4), snd_gf1_i_read_addr(gus, 2, (ctrl & 4) ^ 4), snd_gf1_i_read_addr(gus, 4, ctrl & 4), snd_gf1_i_read_addr(gus, 4, (ctrl & 4) ^ 4)); printk(KERN_INFO " -%i- GF1 ramp start, end, rate = 0x%x, 0x%x, 0x%x\n", voice, snd_gf1_i_read8(gus, 7), snd_gf1_i_read8(gus, 8), snd_gf1_i_read8(gus, 6)); printk(KERN_INFO" -%i- GF1 volume = 0x%x\n", voice, snd_gf1_i_read16(gus, 9)); printk(KERN_INFO " -%i- GF1 position = 0x%x (0x%x)\n", voice, snd_gf1_i_read_addr(gus, 0x0a, ctrl & 4), snd_gf1_i_read_addr(gus, 0x0a, (ctrl & 4) ^ 4)); if (gus->interwave && snd_gf1_i_read8(gus, 0x19) & 0x01) { /* enhanced mode */ mode = snd_gf1_i_read8(gus, 0x15); printk(KERN_INFO " -%i- GFA1 mode = 0x%x\n", voice, mode); if (mode & 0x01) { /* Effect processor */ printk(KERN_INFO " -%i- GFA1 effect address = 0x%x\n", voice, snd_gf1_i_read_addr(gus, 0x11, ctrl & 4)); printk(KERN_INFO " -%i- GFA1 effect volume = 0x%x\n", voice, snd_gf1_i_read16(gus, 0x16)); printk(KERN_INFO " -%i- GFA1 effect volume final = 0x%x\n", voice, snd_gf1_i_read16(gus, 0x1d)); printk(KERN_INFO " -%i- GFA1 effect acumulator = 0x%x\n", voice, snd_gf1_i_read8(gus, 0x14)); } if (mode & 0x20) { printk(KERN_INFO " -%i- GFA1 left offset = 0x%x (%i)\n", voice, snd_gf1_i_read16(gus, 0x13), snd_gf1_i_read16(gus, 0x13) >> 4); printk(KERN_INFO " -%i- GFA1 left offset final = 0x%x (%i)\n", voice, snd_gf1_i_read16(gus, 0x1c), snd_gf1_i_read16(gus, 0x1c) >> 4); printk(KERN_INFO " -%i- GFA1 right offset = 0x%x (%i)\n", voice, snd_gf1_i_read16(gus, 0x0c), snd_gf1_i_read16(gus, 0x0c) >> 4); printk(KERN_INFO " -%i- GFA1 right offset final = 0x%x (%i)\n", voice, snd_gf1_i_read16(gus, 0x1b), snd_gf1_i_read16(gus, 0x1b) >> 4); } else printk(KERN_INFO " -%i- GF1 pan = 0x%x\n", voice, snd_gf1_i_read8(gus, 0x0c)); } else printk(KERN_INFO " -%i- GF1 pan = 0x%x\n", voice, snd_gf1_i_read8(gus, 0x0c)); } #if 0 void snd_gf1_print_global_registers(struct snd_gus_card * gus) { unsigned char global_mode = 0x00; printk(KERN_INFO " -G- GF1 active voices = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_ACTIVE_VOICES)); if (gus->interwave) { global_mode = snd_gf1_i_read8(gus, SNDRV_GF1_GB_GLOBAL_MODE); printk(KERN_INFO " -G- GF1 global mode = 0x%x\n", global_mode); } if (global_mode & 0x02) /* LFO enabled? */ printk(KERN_INFO " -G- GF1 LFO base = 0x%x\n", snd_gf1_i_look16(gus, SNDRV_GF1_GW_LFO_BASE)); printk(KERN_INFO " -G- GF1 voices IRQ read = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_VOICES_IRQ_READ)); printk(KERN_INFO " -G- GF1 DRAM DMA control = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_DRAM_DMA_CONTROL)); printk(KERN_INFO " -G- GF1 DRAM DMA high/low = 0x%x/0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_DRAM_DMA_HIGH), snd_gf1_i_read16(gus, SNDRV_GF1_GW_DRAM_DMA_LOW)); printk(KERN_INFO " -G- GF1 DRAM IO high/low = 0x%x/0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_DRAM_IO_HIGH), snd_gf1_i_read16(gus, SNDRV_GF1_GW_DRAM_IO_LOW)); if (!gus->interwave) printk(KERN_INFO " -G- GF1 record DMA control = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_REC_DMA_CONTROL)); printk(KERN_INFO " -G- GF1 DRAM IO 16 = 0x%x\n", snd_gf1_i_look16(gus, SNDRV_GF1_GW_DRAM_IO16)); if (gus->gf1.enh_mode) { printk(KERN_INFO " -G- GFA1 memory config = 0x%x\n", snd_gf1_i_look16(gus, SNDRV_GF1_GW_MEMORY_CONFIG)); printk(KERN_INFO " -G- GFA1 memory control = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_MEMORY_CONTROL)); printk(KERN_INFO " -G- GFA1 FIFO record base = 0x%x\n", snd_gf1_i_look16(gus, SNDRV_GF1_GW_FIFO_RECORD_BASE_ADDR)); printk(KERN_INFO " -G- GFA1 FIFO playback base = 0x%x\n", snd_gf1_i_look16(gus, SNDRV_GF1_GW_FIFO_PLAY_BASE_ADDR)); printk(KERN_INFO " -G- GFA1 interleave control = 0x%x\n", snd_gf1_i_look16(gus, SNDRV_GF1_GW_INTERLEAVE)); } } void snd_gf1_print_setup_registers(struct snd_gus_card * gus) { printk(KERN_INFO " -S- mix control = 0x%x\n", inb(GUSP(gus, MIXCNTRLREG))); printk(KERN_INFO " -S- IRQ status = 0x%x\n", inb(GUSP(gus, IRQSTAT))); printk(KERN_INFO " -S- timer control = 0x%x\n", inb(GUSP(gus, TIMERCNTRL))); printk(KERN_INFO " -S- timer data = 0x%x\n", inb(GUSP(gus, TIMERDATA))); printk(KERN_INFO " -S- status read = 0x%x\n", inb(GUSP(gus, REGCNTRLS))); printk(KERN_INFO " -S- Sound Blaster control = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_SOUND_BLASTER_CONTROL)); printk(KERN_INFO " -S- AdLib timer 1/2 = 0x%x/0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_ADLIB_TIMER_1), snd_gf1_i_look8(gus, SNDRV_GF1_GB_ADLIB_TIMER_2)); printk(KERN_INFO " -S- reset = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)); if (gus->interwave) { printk(KERN_INFO " -S- compatibility = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_COMPATIBILITY)); printk(KERN_INFO " -S- decode control = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_DECODE_CONTROL)); printk(KERN_INFO " -S- version number = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_VERSION_NUMBER)); printk(KERN_INFO " -S- MPU-401 emul. control A/B = 0x%x/0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_MPU401_CONTROL_A), snd_gf1_i_look8(gus, SNDRV_GF1_GB_MPU401_CONTROL_B)); printk(KERN_INFO " -S- emulation IRQ = 0x%x\n", snd_gf1_i_look8(gus, SNDRV_GF1_GB_EMULATION_IRQ)); } } void snd_gf1_peek_print_block(struct snd_gus_card * gus, unsigned int addr, int count, int w_16bit) { if (!w_16bit) { while (count-- > 0) printk(count > 0 ? "%02x:" : "%02x", snd_gf1_peek(gus, addr++)); } else { while (count-- > 0) { printk(count > 0 ? "%04x:" : "%04x", snd_gf1_peek(gus, addr) | (snd_gf1_peek(gus, addr + 1) << 8)); addr += 2; } } } #endif /* 0 */ #endif
gpl-2.0
amitsirius/linux-3.2.71_sirius
arch/arm/mach-ixp4xx/omixp-setup.c
161
6163
/* * arch/arm/mach-ixp4xx/omixp-setup.c * * omicron ixp4xx board setup * Copyright (C) 2009 OMICRON electronics GmbH * * based nslu2-setup.c, ixdp425-setup.c: * Copyright (C) 2003-2004 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #ifdef CONFIG_LEDS_CLASS #include <linux/leds.h> #endif #include <asm/setup.h> #include <asm/memory.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> static struct resource omixp_flash_resources[] = { { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, }, }; static struct mtd_partition omixp_partitions[] = { { .name = "Recovery Bootloader", .size = 0x00020000, .offset = 0, }, { .name = "Calibration Data", .size = 0x00020000, .offset = 0x00020000, }, { .name = "Recovery FPGA", .size = 0x00020000, .offset = 0x00040000, }, { .name = "Release Bootloader", .size = 0x00020000, .offset = 0x00060000, }, { .name = "Release FPGA", .size = 0x00020000, .offset = 0x00080000, }, { .name = "Kernel", .size = 0x00160000, .offset = 0x000a0000, }, { .name = "Filesystem", .size = 0x00C00000, .offset = 0x00200000, }, { .name = "Persistent Storage", .size = 0x00200000, .offset = 0x00E00000, }, }; static struct flash_platform_data omixp_flash_data[] = { { .map_name = "cfi_probe", .parts = omixp_partitions, .nr_parts = ARRAY_SIZE(omixp_partitions), }, { .map_name = "cfi_probe", .parts = NULL, .nr_parts = 0, }, }; static struct platform_device omixp_flash_device[] = { { .name = "IXP4XX-Flash", .id = 0, .dev = { .platform_data = &omixp_flash_data[0], }, .resource = &omixp_flash_resources[0], .num_resources = 1, }, { .name = "IXP4XX-Flash", .id = 1, .dev = { .platform_data = &omixp_flash_data[1], }, .resource = &omixp_flash_resources[1], .num_resources = 1, }, }; /* Swap UART's - These boards have the console on UART2. The following * configuration is used: * ttyS0 .. UART2 * ttyS1 .. UART1 * This way standard images can be used with the kernel that expect * the console on ttyS0. */ static struct resource omixp_uart_resources[] = { { .start = IXP4XX_UART2_BASE_PHYS, .end = IXP4XX_UART2_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }, { .start = IXP4XX_UART1_BASE_PHYS, .end = IXP4XX_UART1_BASE_PHYS + 0x0fff, .flags = IORESOURCE_MEM, }, }; static struct plat_serial8250_port omixp_uart_data[] = { { .mapbase = IXP4XX_UART2_BASE_PHYS, .membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART2, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { .mapbase = IXP4XX_UART1_BASE_PHYS, .membase = (char *)IXP4XX_UART1_BASE_VIRT + REG_OFFSET, .irq = IRQ_IXP4XX_UART1, .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, .iotype = UPIO_MEM, .regshift = 2, .uartclk = IXP4XX_UART_XTAL, }, { /* list termination */ } }; static struct platform_device omixp_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev.platform_data = omixp_uart_data, .num_resources = 2, .resource = omixp_uart_resources, }; static struct gpio_led mic256_led_pins[] = { { .name = "LED-A", .gpio = 7, }, }; static struct gpio_led_platform_data mic256_led_data = { .num_leds = ARRAY_SIZE(mic256_led_pins), .leds = mic256_led_pins, }; static struct platform_device mic256_leds = { .name = "leds-gpio", .id = -1, .dev.platform_data = &mic256_led_data, }; /* Built-in 10/100 Ethernet MAC interfaces */ static struct eth_plat_info ixdp425_plat_eth[] = { { .phy = 0, .rxq = 3, .txreadyq = 20, }, { .phy = 1, .rxq = 4, .txreadyq = 21, }, }; static struct platform_device ixdp425_eth[] = { { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEB, .dev.platform_data = ixdp425_plat_eth, }, { .name = "ixp4xx_eth", .id = IXP4XX_ETH_NPEC, .dev.platform_data = ixdp425_plat_eth + 1, }, }; static struct platform_device *devixp_pldev[] __initdata = { &omixp_uart, &omixp_flash_device[0], &ixdp425_eth[0], &ixdp425_eth[1], }; static struct platform_device *mic256_pldev[] __initdata = { &omixp_uart, &omixp_flash_device[0], &mic256_leds, &ixdp425_eth[0], &ixdp425_eth[1], }; static struct platform_device *miccpt_pldev[] __initdata = { &omixp_uart, &omixp_flash_device[0], &omixp_flash_device[1], &ixdp425_eth[0], &ixdp425_eth[1], }; static void __init omixp_init(void) { ixp4xx_sys_init(); /* 16MiB Boot Flash */ omixp_flash_resources[0].start = IXP4XX_EXP_BUS_BASE(0); omixp_flash_resources[0].end = IXP4XX_EXP_BUS_END(0); /* 32 MiB Data Flash */ omixp_flash_resources[1].start = IXP4XX_EXP_BUS_BASE(2); omixp_flash_resources[1].end = IXP4XX_EXP_BUS_END(2); if (machine_is_devixp()) platform_add_devices(devixp_pldev, ARRAY_SIZE(devixp_pldev)); else if (machine_is_miccpt()) platform_add_devices(miccpt_pldev, ARRAY_SIZE(miccpt_pldev)); else if (machine_is_mic256()) platform_add_devices(mic256_pldev, ARRAY_SIZE(mic256_pldev)); } #ifdef CONFIG_MACH_DEVIXP MACHINE_START(DEVIXP, "Omicron DEVIXP") .atag_offset = 0x100, .map_io = ixp4xx_map_io, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .init_machine = omixp_init, MACHINE_END #endif #ifdef CONFIG_MACH_MICCPT MACHINE_START(MICCPT, "Omicron MICCPT") .atag_offset = 0x100, .map_io = ixp4xx_map_io, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .init_machine = omixp_init, #if defined(CONFIG_PCI) .dma_zone_size = SZ_64M, #endif MACHINE_END #endif #ifdef CONFIG_MACH_MIC256 MACHINE_START(MIC256, "Omicron MIC256") .atag_offset = 0x100, .map_io = ixp4xx_map_io, .init_irq = ixp4xx_init_irq, .timer = &ixp4xx_timer, .init_machine = omixp_init, MACHINE_END #endif
gpl-2.0
CalcProgrammer1/archos-gen8-kernel-2.6.37
arch/cris/arch-v32/drivers/sync_serial.c
161
45445
/* * Simple synchronous serial port driver for ETRAX FS and Artpec-3. * * Copyright (c) 2005 Axis Communications AB * * Author: Mikael Starvik * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/major.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/spinlock.h> #include <asm/io.h> #include <dma.h> #include <pinmux.h> #include <hwregs/reg_rdwr.h> #include <hwregs/sser_defs.h> #include <hwregs/dma_defs.h> #include <hwregs/dma.h> #include <hwregs/intr_vect_defs.h> #include <hwregs/intr_vect.h> #include <hwregs/reg_map.h> #include <asm/sync_serial.h> /* The receiver is a bit tricky beacuse of the continuous stream of data.*/ /* */ /* Three DMA descriptors are linked together. Each DMA descriptor is */ /* responsible for port->bufchunk of a common buffer. */ /* */ /* +---------------------------------------------+ */ /* | +----------+ +----------+ +----------+ | */ /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */ /* +----------+ +----------+ +----------+ */ /* | | | */ /* v v v */ /* +-------------------------------------+ */ /* | BUFFER | */ /* +-------------------------------------+ */ /* |<- data_avail ->| */ /* readp writep */ /* */ /* If the application keeps up the pace readp will be right after writep.*/ /* If the application can't keep the pace we have to throw away data. */ /* The idea is that readp should be ready with the data pointed out by */ /* Descr[i] when the DMA has filled in Descr[i+1]. */ /* Otherwise we will discard */ /* the rest of the data pointed out by Descr1 and set readp to the start */ /* of Descr2 */ #define SYNC_SERIAL_MAJOR 125 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */ /* words can be handled */ #define IN_BUFFER_SIZE 12288 #define IN_DESCR_SIZE 256 #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE) #define OUT_BUFFER_SIZE 1024*8 #define NBR_OUT_DESCR 8 #define DEFAULT_FRAME_RATE 0 #define DEFAULT_WORD_RATE 7 /* NOTE: Enabling some debug will likely cause overrun or underrun, * especially if manual mode is use. */ #define DEBUG(x) #define DEBUGREAD(x) #define DEBUGWRITE(x) #define DEBUGPOLL(x) #define DEBUGRXINT(x) #define DEBUGTXINT(x) #define DEBUGTRDMA(x) #define DEBUGOUTBUF(x) typedef struct sync_port { reg_scope_instances regi_sser; reg_scope_instances regi_dmain; reg_scope_instances regi_dmaout; char started; /* 1 if port has been started */ char port_nbr; /* Port 0 or 1 */ char busy; /* 1 if port is busy */ char enabled; /* 1 if port is enabled */ char use_dma; /* 1 if port uses dma */ char tr_running; char init_irqs; int output; int input; /* Next byte to be read by application */ volatile unsigned char *volatile readp; /* Next byte to be written by etrax */ volatile unsigned char *volatile writep; unsigned int in_buffer_size; unsigned int inbufchunk; unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32))); unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32))); unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32))); struct dma_descr_data* next_rx_desc; struct dma_descr_data* prev_rx_desc; /* Pointer to the first available descriptor in the ring, * unless active_tr_descr == catch_tr_descr and a dma * transfer is active */ struct dma_descr_data *active_tr_descr; /* Pointer to the first allocated descriptor in the ring */ struct dma_descr_data *catch_tr_descr; /* Pointer to the descriptor with the current end-of-list */ struct dma_descr_data *prev_tr_descr; int full; /* Pointer to the first byte being read by DMA * or current position in out_buffer if not using DMA. */ unsigned char *out_rd_ptr; /* Number of bytes currently locked for being read by DMA */ int out_buf_count; dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16))); dma_descr_context in_context __attribute__ ((__aligned__(32))); dma_descr_data out_descr[NBR_OUT_DESCR] __attribute__ ((__aligned__(16))); dma_descr_context out_context __attribute__ ((__aligned__(32))); wait_queue_head_t out_wait_q; wait_queue_head_t in_wait_q; spinlock_t lock; } sync_port; static DEFINE_MUTEX(sync_serial_mutex); static int etrax_sync_serial_init(void); static void initialize_port(int portnbr); static inline int sync_data_avail(struct sync_port *port); static int sync_serial_open(struct inode *, struct file*); static int sync_serial_release(struct inode*, struct file*); static unsigned int sync_serial_poll(struct file *filp, poll_table *wait); static int sync_serial_ioctl(struct file *, unsigned int cmd, unsigned long arg); static ssize_t sync_serial_write(struct file * file, const char * buf, size_t count, loff_t *ppos); static ssize_t sync_serial_read(struct file *file, char *buf, size_t count, loff_t *ppos); #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)) #define SYNC_SER_DMA #endif static void send_word(sync_port* port); static void start_dma_out(struct sync_port *port, const char *data, int count); static void start_dma_in(sync_port* port); #ifdef SYNC_SER_DMA static irqreturn_t tr_interrupt(int irq, void *dev_id); static irqreturn_t rx_interrupt(int irq, void *dev_id); #endif #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)) #define SYNC_SER_MANUAL #endif #ifdef SYNC_SER_MANUAL static irqreturn_t manual_interrupt(int irq, void *dev_id); #endif #ifdef CONFIG_ETRAXFS /* ETRAX FS */ #define OUT_DMA_NBR 4 #define IN_DMA_NBR 5 #define PINMUX_SSER pinmux_sser0 #define SYNCSER_INST regi_sser0 #define SYNCSER_INTR_VECT SSER0_INTR_VECT #define OUT_DMA_INST regi_dma4 #define IN_DMA_INST regi_dma5 #define DMA_OUT_INTR_VECT DMA4_INTR_VECT #define DMA_IN_INTR_VECT DMA5_INTR_VECT #define REQ_DMA_SYNCSER dma_sser0 #else /* Artpec-3 */ #define OUT_DMA_NBR 6 #define IN_DMA_NBR 7 #define PINMUX_SSER pinmux_sser #define SYNCSER_INST regi_sser #define SYNCSER_INTR_VECT SSER_INTR_VECT #define OUT_DMA_INST regi_dma6 #define IN_DMA_INST regi_dma7 #define DMA_OUT_INTR_VECT DMA6_INTR_VECT #define DMA_IN_INTR_VECT DMA7_INTR_VECT #define REQ_DMA_SYNCSER dma_sser #endif /* The ports */ static struct sync_port ports[]= { { .regi_sser = SYNCSER_INST, .regi_dmaout = OUT_DMA_INST, .regi_dmain = IN_DMA_INST, #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA) .use_dma = 1, #else .use_dma = 0, #endif } #ifdef CONFIG_ETRAXFS , { .regi_sser = regi_sser1, .regi_dmaout = regi_dma6, .regi_dmain = regi_dma7, #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA) .use_dma = 1, #else .use_dma = 0, #endif } #endif }; #define NBR_PORTS ARRAY_SIZE(ports) static const struct file_operations sync_serial_fops = { .owner = THIS_MODULE, .write = sync_serial_write, .read = sync_serial_read, .poll = sync_serial_poll, .unlocked_ioctl = sync_serial_ioctl, .open = sync_serial_open, .release = sync_serial_release, .llseek = noop_llseek, }; static int __init etrax_sync_serial_init(void) { ports[0].enabled = 0; #ifdef CONFIG_ETRAXFS ports[1].enabled = 0; #endif if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial", &sync_serial_fops) < 0) { printk(KERN_WARNING "Unable to get major for synchronous serial port\n"); return -EBUSY; } /* Initialize Ports */ #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) { printk(KERN_WARNING "Unable to alloc pins for synchronous serial port 0\n"); return -EIO; } ports[0].enabled = 1; initialize_port(0); #endif #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) { printk(KERN_WARNING "Unable to alloc pins for synchronous serial port 0\n"); return -EIO; } ports[1].enabled = 1; initialize_port(1); #endif #ifdef CONFIG_ETRAXFS printk(KERN_INFO "ETRAX FS synchronous serial port driver\n"); #else printk(KERN_INFO "Artpec-3 synchronous serial port driver\n"); #endif return 0; } static void __init initialize_port(int portnbr) { int __attribute__((unused)) i; struct sync_port *port = &ports[portnbr]; reg_sser_rw_cfg cfg = {0}; reg_sser_rw_frm_cfg frm_cfg = {0}; reg_sser_rw_tr_cfg tr_cfg = {0}; reg_sser_rw_rec_cfg rec_cfg = {0}; DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr)); port->port_nbr = portnbr; port->init_irqs = 1; port->out_rd_ptr = port->out_buffer; port->out_buf_count = 0; port->output = 1; port->input = 0; port->readp = port->flip; port->writep = port->flip; port->in_buffer_size = IN_BUFFER_SIZE; port->inbufchunk = IN_DESCR_SIZE; port->next_rx_desc = &port->in_descr[0]; port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1]; port->prev_rx_desc->eol = 1; init_waitqueue_head(&port->out_wait_q); init_waitqueue_head(&port->in_wait_q); spin_lock_init(&port->lock); cfg.out_clk_src = regk_sser_intern_clk; cfg.out_clk_pol = regk_sser_pos; cfg.clk_od_mode = regk_sser_no; cfg.clk_dir = regk_sser_out; cfg.gate_clk = regk_sser_no; cfg.base_freq = regk_sser_f29_493; cfg.clk_div = 256; REG_WR(sser, port->regi_sser, rw_cfg, cfg); frm_cfg.wordrate = DEFAULT_WORD_RATE; frm_cfg.type = regk_sser_edge; frm_cfg.frame_pin_dir = regk_sser_out; frm_cfg.frame_pin_use = regk_sser_frm; frm_cfg.status_pin_dir = regk_sser_in; frm_cfg.status_pin_use = regk_sser_hold; frm_cfg.out_on = regk_sser_tr; frm_cfg.tr_delay = 1; REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg); tr_cfg.urun_stop = regk_sser_no; tr_cfg.sample_size = 7; tr_cfg.sh_dir = regk_sser_msbfirst; tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; #if 0 tr_cfg.rate_ctrl = regk_sser_bulk; tr_cfg.data_pin_use = regk_sser_dout; #else tr_cfg.rate_ctrl = regk_sser_iso; tr_cfg.data_pin_use = regk_sser_dout; #endif tr_cfg.bulk_wspace = 1; REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); rec_cfg.sample_size = 7; rec_cfg.sh_dir = regk_sser_msbfirst; rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; rec_cfg.fifo_thr = regk_sser_inf; REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); #ifdef SYNC_SER_DMA /* Setup the descriptor ring for dma out/transmit. */ for (i = 0; i < NBR_OUT_DESCR; i++) { port->out_descr[i].wait = 0; port->out_descr[i].intr = 1; port->out_descr[i].eol = 0; port->out_descr[i].out_eop = 0; port->out_descr[i].next = (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]); } /* Create a ring from the list. */ port->out_descr[NBR_OUT_DESCR-1].next = (dma_descr_data *)virt_to_phys(&port->out_descr[0]); /* Setup context for traversing the ring. */ port->active_tr_descr = &port->out_descr[0]; port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1]; port->catch_tr_descr = &port->out_descr[0]; #endif } static inline int sync_data_avail(struct sync_port *port) { int avail; unsigned char *start; unsigned char *end; start = (unsigned char*)port->readp; /* cast away volatile */ end = (unsigned char*)port->writep; /* cast away volatile */ /* 0123456789 0123456789 * ----- - ----- * ^rp ^wp ^wp ^rp */ if (end >= start) avail = end - start; else avail = port->in_buffer_size - (start - end); return avail; } static inline int sync_data_avail_to_end(struct sync_port *port) { int avail; unsigned char *start; unsigned char *end; start = (unsigned char*)port->readp; /* cast away volatile */ end = (unsigned char*)port->writep; /* cast away volatile */ /* 0123456789 0123456789 * ----- ----- * ^rp ^wp ^wp ^rp */ if (end >= start) avail = end - start; else avail = port->flip + port->in_buffer_size - start; return avail; } static int sync_serial_open(struct inode *inode, struct file *file) { int dev = iminor(inode); int ret = -EBUSY; sync_port *port; reg_dma_rw_cfg cfg = {.en = regk_dma_yes}; reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes}; mutex_lock(&sync_serial_mutex); DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev)); if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); ret = -ENODEV; goto out; } port = &ports[dev]; /* Allow open this device twice (assuming one reader and one writer) */ if (port->busy == 2) { DEBUG(printk(KERN_DEBUG "Device is busy.. \n")); goto out; } if (port->init_irqs) { if (port->use_dma) { if (port == &ports[0]) { #ifdef SYNC_SER_DMA if (request_irq(DMA_OUT_INTR_VECT, tr_interrupt, 0, "synchronous serial 0 dma tr", &ports[0])) { printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); goto out; } else if (request_irq(DMA_IN_INTR_VECT, rx_interrupt, 0, "synchronous serial 1 dma rx", &ports[0])) { free_irq(DMA_OUT_INTR_VECT, &port[0]); printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); goto out; } else if (crisv32_request_dma(OUT_DMA_NBR, "synchronous serial 0 dma tr", DMA_VERBOSE_ON_ERROR, 0, REQ_DMA_SYNCSER)) { free_irq(DMA_OUT_INTR_VECT, &port[0]); free_irq(DMA_IN_INTR_VECT, &port[0]); printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel"); goto out; } else if (crisv32_request_dma(IN_DMA_NBR, "synchronous serial 0 dma rec", DMA_VERBOSE_ON_ERROR, 0, REQ_DMA_SYNCSER)) { crisv32_free_dma(OUT_DMA_NBR); free_irq(DMA_OUT_INTR_VECT, &port[0]); free_irq(DMA_IN_INTR_VECT, &port[0]); printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel"); goto out; } #endif } #ifdef CONFIG_ETRAXFS else if (port == &ports[1]) { #ifdef SYNC_SER_DMA if (request_irq(DMA6_INTR_VECT, tr_interrupt, 0, "synchronous serial 1 dma tr", &ports[1])) { printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ"); goto out; } else if (request_irq(DMA7_INTR_VECT, rx_interrupt, 0, "synchronous serial 1 dma rx", &ports[1])) { free_irq(DMA6_INTR_VECT, &ports[1]); printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ"); goto out; } else if (crisv32_request_dma( SYNC_SER1_TX_DMA_NBR, "synchronous serial 1 dma tr", DMA_VERBOSE_ON_ERROR, 0, dma_sser1)) { free_irq(DMA6_INTR_VECT, &ports[1]); free_irq(DMA7_INTR_VECT, &ports[1]); printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel"); goto out; } else if (crisv32_request_dma( SYNC_SER1_RX_DMA_NBR, "synchronous serial 3 dma rec", DMA_VERBOSE_ON_ERROR, 0, dma_sser1)) { crisv32_free_dma(SYNC_SER1_TX_DMA_NBR); free_irq(DMA6_INTR_VECT, &ports[1]); free_irq(DMA7_INTR_VECT, &ports[1]); printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel"); goto out; } #endif } #endif /* Enable DMAs */ REG_WR(dma, port->regi_dmain, rw_cfg, cfg); REG_WR(dma, port->regi_dmaout, rw_cfg, cfg); /* Enable DMA IRQs */ REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask); REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask); /* Set up wordsize = 1 for DMAs. */ DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1); DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1); start_dma_in(port); port->init_irqs = 0; } else { /* !port->use_dma */ #ifdef SYNC_SER_MANUAL if (port == &ports[0]) { if (request_irq(SYNCSER_INTR_VECT, manual_interrupt, 0, "synchronous serial manual irq", &ports[0])) { printk("Can't allocate sync serial manual irq"); goto out; } } #ifdef CONFIG_ETRAXFS else if (port == &ports[1]) { if (request_irq(SSER1_INTR_VECT, manual_interrupt, 0, "synchronous serial manual irq", &ports[1])) { printk(KERN_CRIT "Can't allocate sync serial manual irq"); goto out; } } #endif port->init_irqs = 0; #else panic("sync_serial: Manual mode not supported.\n"); #endif /* SYNC_SER_MANUAL */ } } /* port->init_irqs */ port->busy++; ret = 0; out: mutex_unlock(&sync_serial_mutex); return ret; } static int sync_serial_release(struct inode *inode, struct file *file) { int dev = iminor(inode); sync_port *port; if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(printk("Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; if (port->busy) port->busy--; if (!port->busy) /* XXX */ ; return 0; } static unsigned int sync_serial_poll(struct file *file, poll_table *wait) { int dev = iminor(file->f_path.dentry->d_inode); unsigned int mask = 0; sync_port *port; DEBUGPOLL( static unsigned int prev_mask = 0; ); port = &ports[dev]; if (!port->started) { reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); cfg.en = regk_sser_yes; rec_cfg.rec_en = port->input; REG_WR(sser, port->regi_sser, rw_cfg, cfg); REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); port->started = 1; } poll_wait(file, &port->out_wait_q, wait); poll_wait(file, &port->in_wait_q, wait); /* No active transfer, descriptors are available */ if (port->output && !port->tr_running) mask |= POLLOUT | POLLWRNORM; /* Descriptor and buffer space available. */ if (port->output && port->active_tr_descr != port->catch_tr_descr && port->out_buf_count < OUT_BUFFER_SIZE) mask |= POLLOUT | POLLWRNORM; /* At least an inbufchunk of data */ if (port->input && sync_data_avail(port) >= port->inbufchunk) mask |= POLLIN | POLLRDNORM; DEBUGPOLL(if (mask != prev_mask) printk("sync_serial_poll: mask 0x%08X %s %s\n", mask, mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":""); prev_mask = mask; ); return mask; } static int sync_serial_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int return_val = 0; int dma_w_size = regk_dma_set_w_size1; int dev = iminor(file->f_path.dentry->d_inode); sync_port *port; reg_sser_rw_tr_cfg tr_cfg; reg_sser_rw_rec_cfg rec_cfg; reg_sser_rw_frm_cfg frm_cfg; reg_sser_rw_cfg gen_cfg; reg_sser_rw_intr_mask intr_mask; if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(printk("Invalid minor %d\n", dev)); return -1; } port = &ports[dev]; spin_lock_irq(&port->lock); tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg); gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg); intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); switch(cmd) { case SSP_SPEED: if (GET_SPEED(arg) == CODEC) { unsigned int freq; gen_cfg.base_freq = regk_sser_f32; /* Clock divider will internally be * gen_cfg.clk_div + 1. */ freq = GET_FREQ(arg); switch (freq) { case FREQ_32kHz: case FREQ_64kHz: case FREQ_128kHz: case FREQ_256kHz: gen_cfg.clk_div = 125 * (1 << (freq - FREQ_256kHz)) - 1; break; case FREQ_512kHz: gen_cfg.clk_div = 62; break; case FREQ_1MHz: case FREQ_2MHz: case FREQ_4MHz: gen_cfg.clk_div = 8 * (1 << freq) - 1; break; } } else { gen_cfg.base_freq = regk_sser_f29_493; switch (GET_SPEED(arg)) { case SSP150: gen_cfg.clk_div = 29493000 / (150 * 8) - 1; break; case SSP300: gen_cfg.clk_div = 29493000 / (300 * 8) - 1; break; case SSP600: gen_cfg.clk_div = 29493000 / (600 * 8) - 1; break; case SSP1200: gen_cfg.clk_div = 29493000 / (1200 * 8) - 1; break; case SSP2400: gen_cfg.clk_div = 29493000 / (2400 * 8) - 1; break; case SSP4800: gen_cfg.clk_div = 29493000 / (4800 * 8) - 1; break; case SSP9600: gen_cfg.clk_div = 29493000 / (9600 * 8) - 1; break; case SSP19200: gen_cfg.clk_div = 29493000 / (19200 * 8) - 1; break; case SSP28800: gen_cfg.clk_div = 29493000 / (28800 * 8) - 1; break; case SSP57600: gen_cfg.clk_div = 29493000 / (57600 * 8) - 1; break; case SSP115200: gen_cfg.clk_div = 29493000 / (115200 * 8) - 1; break; case SSP230400: gen_cfg.clk_div = 29493000 / (230400 * 8) - 1; break; case SSP460800: gen_cfg.clk_div = 29493000 / (460800 * 8) - 1; break; case SSP921600: gen_cfg.clk_div = 29493000 / (921600 * 8) - 1; break; case SSP3125000: gen_cfg.base_freq = regk_sser_f100; gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1; break; } } frm_cfg.wordrate = GET_WORD_RATE(arg); break; case SSP_MODE: switch(arg) { case MASTER_OUTPUT: port->output = 1; port->input = 0; frm_cfg.out_on = regk_sser_tr; frm_cfg.frame_pin_dir = regk_sser_out; gen_cfg.clk_dir = regk_sser_out; break; case SLAVE_OUTPUT: port->output = 1; port->input = 0; frm_cfg.frame_pin_dir = regk_sser_in; gen_cfg.clk_dir = regk_sser_in; break; case MASTER_INPUT: port->output = 0; port->input = 1; frm_cfg.frame_pin_dir = regk_sser_out; frm_cfg.out_on = regk_sser_intern_tb; gen_cfg.clk_dir = regk_sser_out; break; case SLAVE_INPUT: port->output = 0; port->input = 1; frm_cfg.frame_pin_dir = regk_sser_in; gen_cfg.clk_dir = regk_sser_in; break; case MASTER_BIDIR: port->output = 1; port->input = 1; frm_cfg.frame_pin_dir = regk_sser_out; frm_cfg.out_on = regk_sser_intern_tb; gen_cfg.clk_dir = regk_sser_out; break; case SLAVE_BIDIR: port->output = 1; port->input = 1; frm_cfg.frame_pin_dir = regk_sser_in; gen_cfg.clk_dir = regk_sser_in; break; default: spin_unlock_irq(&port->lock); return -EINVAL; } if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT)) intr_mask.rdav = regk_sser_yes; break; case SSP_FRAME_SYNC: if (arg & NORMAL_SYNC) { frm_cfg.rec_delay = 1; frm_cfg.tr_delay = 1; } else if (arg & EARLY_SYNC) frm_cfg.rec_delay = frm_cfg.tr_delay = 0; else if (arg & SECOND_WORD_SYNC) { frm_cfg.rec_delay = 7; frm_cfg.tr_delay = 1; } tr_cfg.bulk_wspace = frm_cfg.tr_delay; frm_cfg.early_wend = regk_sser_yes; if (arg & BIT_SYNC) frm_cfg.type = regk_sser_edge; else if (arg & WORD_SYNC) frm_cfg.type = regk_sser_level; else if (arg & EXTENDED_SYNC) frm_cfg.early_wend = regk_sser_no; if (arg & SYNC_ON) frm_cfg.frame_pin_use = regk_sser_frm; else if (arg & SYNC_OFF) frm_cfg.frame_pin_use = regk_sser_gio0; dma_w_size = regk_dma_set_w_size2; if (arg & WORD_SIZE_8) { rec_cfg.sample_size = tr_cfg.sample_size = 7; dma_w_size = regk_dma_set_w_size1; } else if (arg & WORD_SIZE_12) rec_cfg.sample_size = tr_cfg.sample_size = 11; else if (arg & WORD_SIZE_16) rec_cfg.sample_size = tr_cfg.sample_size = 15; else if (arg & WORD_SIZE_24) rec_cfg.sample_size = tr_cfg.sample_size = 23; else if (arg & WORD_SIZE_32) rec_cfg.sample_size = tr_cfg.sample_size = 31; if (arg & BIT_ORDER_MSB) rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst; else if (arg & BIT_ORDER_LSB) rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst; if (arg & FLOW_CONTROL_ENABLE) { frm_cfg.status_pin_use = regk_sser_frm; rec_cfg.fifo_thr = regk_sser_thr16; } else if (arg & FLOW_CONTROL_DISABLE) { frm_cfg.status_pin_use = regk_sser_gio0; rec_cfg.fifo_thr = regk_sser_inf; } if (arg & CLOCK_NOT_GATED) gen_cfg.gate_clk = regk_sser_no; else if (arg & CLOCK_GATED) gen_cfg.gate_clk = regk_sser_yes; break; case SSP_IPOLARITY: /* NOTE!! negedge is considered NORMAL */ if (arg & CLOCK_NORMAL) rec_cfg.clk_pol = regk_sser_neg; else if (arg & CLOCK_INVERT) rec_cfg.clk_pol = regk_sser_pos; if (arg & FRAME_NORMAL) frm_cfg.level = regk_sser_pos_hi; else if (arg & FRAME_INVERT) frm_cfg.level = regk_sser_neg_lo; if (arg & STATUS_NORMAL) gen_cfg.hold_pol = regk_sser_pos; else if (arg & STATUS_INVERT) gen_cfg.hold_pol = regk_sser_neg; break; case SSP_OPOLARITY: if (arg & CLOCK_NORMAL) gen_cfg.out_clk_pol = regk_sser_pos; else if (arg & CLOCK_INVERT) gen_cfg.out_clk_pol = regk_sser_neg; if (arg & FRAME_NORMAL) frm_cfg.level = regk_sser_pos_hi; else if (arg & FRAME_INVERT) frm_cfg.level = regk_sser_neg_lo; if (arg & STATUS_NORMAL) gen_cfg.hold_pol = regk_sser_pos; else if (arg & STATUS_INVERT) gen_cfg.hold_pol = regk_sser_neg; break; case SSP_SPI: rec_cfg.fifo_thr = regk_sser_inf; rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst; rec_cfg.sample_size = tr_cfg.sample_size = 7; frm_cfg.frame_pin_use = regk_sser_frm; frm_cfg.type = regk_sser_level; frm_cfg.tr_delay = 1; frm_cfg.level = regk_sser_neg_lo; if (arg & SPI_SLAVE) { rec_cfg.clk_pol = regk_sser_neg; gen_cfg.clk_dir = regk_sser_in; port->input = 1; port->output = 0; } else { gen_cfg.out_clk_pol = regk_sser_pos; port->input = 0; port->output = 1; gen_cfg.clk_dir = regk_sser_out; } break; case SSP_INBUFCHUNK: break; default: return_val = -1; } if (port->started) { rec_cfg.rec_en = port->input; gen_cfg.en = (port->output | port->input); } REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg); REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 | WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) { int en = gen_cfg.en; gen_cfg.en = 0; REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); /* ##### Should DMA be stoped before we change dma size? */ DMA_WR_CMD(port->regi_dmain, dma_w_size); DMA_WR_CMD(port->regi_dmaout, dma_w_size); gen_cfg.en = en; REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); } spin_unlock_irq(&port->lock); return return_val; } static long sync_serial_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; mutex_lock(&sync_serial_mutex); ret = sync_serial_ioctl_unlocked(file, cmd, arg); mutex_unlock(&sync_serial_mutex); return ret; } /* NOTE: sync_serial_write does not support concurrency */ static ssize_t sync_serial_write(struct file *file, const char *buf, size_t count, loff_t *ppos) { int dev = iminor(file->f_path.dentry->d_inode); DECLARE_WAITQUEUE(wait, current); struct sync_port *port; int trunc_count; unsigned long flags; int bytes_free; int out_buf_count; unsigned char *rd_ptr; /* First allocated byte in the buffer */ unsigned char *wr_ptr; /* First free byte in the buffer */ unsigned char *buf_stop_ptr; /* Last byte + 1 */ if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(printk("Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; /* |<- OUT_BUFFER_SIZE ->| * |<- out_buf_count ->| * |<- trunc_count ->| ...->| * ______________________________________________________ * | free | data | free | * |_________|___________________|________________________| * ^ rd_ptr ^ wr_ptr */ DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n", port->port_nbr, count, port->active_tr_descr, port->catch_tr_descr)); /* Read variables that may be updated by interrupts */ spin_lock_irqsave(&port->lock, flags); rd_ptr = port->out_rd_ptr; out_buf_count = port->out_buf_count; spin_unlock_irqrestore(&port->lock, flags); /* Check if resources are available */ if (port->tr_running && ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) || out_buf_count >= OUT_BUFFER_SIZE)) { DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev)); return -EAGAIN; } buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE; /* Determine pointer to the first free byte, before copying. */ wr_ptr = rd_ptr + out_buf_count; if (wr_ptr >= buf_stop_ptr) wr_ptr -= OUT_BUFFER_SIZE; /* If we wrap the ring buffer, let the user space program handle it by * truncating the data. This could be more elegant, small buffer * fragments may occur. */ bytes_free = OUT_BUFFER_SIZE - out_buf_count; if (wr_ptr + bytes_free > buf_stop_ptr) bytes_free = buf_stop_ptr - wr_ptr; trunc_count = (count < bytes_free) ? count : bytes_free; if (copy_from_user(wr_ptr, buf, trunc_count)) return -EFAULT; DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n", out_buf_count, trunc_count, port->out_buf_count, port->out_buffer, wr_ptr, buf_stop_ptr)); /* Make sure transmitter/receiver is running */ if (!port->started) { reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); cfg.en = regk_sser_yes; rec_cfg.rec_en = port->input; REG_WR(sser, port->regi_sser, rw_cfg, cfg); REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); port->started = 1; } /* Setup wait if blocking */ if (!(file->f_flags & O_NONBLOCK)) { add_wait_queue(&port->out_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); } spin_lock_irqsave(&port->lock, flags); port->out_buf_count += trunc_count; if (port->use_dma) { start_dma_out(port, wr_ptr, trunc_count); } else if (!port->tr_running) { reg_sser_rw_intr_mask intr_mask; intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); /* Start sender by writing data */ send_word(port); /* and enable transmitter ready IRQ */ intr_mask.trdy = 1; REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); } spin_unlock_irqrestore(&port->lock, flags); /* Exit if non blocking */ if (file->f_flags & O_NONBLOCK) { DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n", port->port_nbr, trunc_count, REG_RD_INT(dma, port->regi_dmaout, r_intr))); return trunc_count; } schedule(); set_current_state(TASK_RUNNING); remove_wait_queue(&port->out_wait_q, &wait); if (signal_pending(current)) return -EINTR; DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n", port->port_nbr, trunc_count)); return trunc_count; } static ssize_t sync_serial_read(struct file * file, char * buf, size_t count, loff_t *ppos) { int dev = iminor(file->f_path.dentry->d_inode); int avail; sync_port *port; unsigned char* start; unsigned char* end; unsigned long flags; if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { DEBUG(printk("Invalid minor %d\n", dev)); return -ENODEV; } port = &ports[dev]; DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size)); if (!port->started) { reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); cfg.en = regk_sser_yes; tr_cfg.tr_en = regk_sser_yes; rec_cfg.rec_en = regk_sser_yes; REG_WR(sser, port->regi_sser, rw_cfg, cfg); REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); port->started = 1; } /* Calculate number of available bytes */ /* Save pointers to avoid that they are modified by interrupt */ spin_lock_irqsave(&port->lock, flags); start = (unsigned char*)port->readp; /* cast away volatile */ end = (unsigned char*)port->writep; /* cast away volatile */ spin_unlock_irqrestore(&port->lock, flags); while ((start == end) && !port->full) /* No data */ { DEBUGREAD(printk(KERN_DEBUG "&")); if (file->f_flags & O_NONBLOCK) return -EAGAIN; interruptible_sleep_on(&port->in_wait_q); if (signal_pending(current)) return -EINTR; spin_lock_irqsave(&port->lock, flags); start = (unsigned char*)port->readp; /* cast away volatile */ end = (unsigned char*)port->writep; /* cast away volatile */ spin_unlock_irqrestore(&port->lock, flags); } /* Lazy read, never return wrapped data. */ if (port->full) avail = port->in_buffer_size; else if (end > start) avail = end - start; else avail = port->flip + port->in_buffer_size - start; count = count > avail ? avail : count; if (copy_to_user(buf, start, count)) return -EFAULT; /* Disable interrupts while updating readp */ spin_lock_irqsave(&port->lock, flags); port->readp += count; if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */ port->readp = port->flip; port->full = 0; spin_unlock_irqrestore(&port->lock, flags); DEBUGREAD(printk("r %d\n", count)); return count; } static void send_word(sync_port* port) { reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); reg_sser_rw_tr_data tr_data = {0}; switch(tr_cfg.sample_size) { case 8: port->out_buf_count--; tr_data.data = *port->out_rd_ptr++; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; case 12: { int data = (*port->out_rd_ptr++) << 8; data |= *port->out_rd_ptr++; port->out_buf_count -= 2; tr_data.data = data; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; } break; case 16: port->out_buf_count -= 2; tr_data.data = *(unsigned short *)port->out_rd_ptr; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); port->out_rd_ptr += 2; if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; case 24: port->out_buf_count -= 3; tr_data.data = *(unsigned short *)port->out_rd_ptr; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); port->out_rd_ptr += 2; tr_data.data = *port->out_rd_ptr++; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; case 32: port->out_buf_count -= 4; tr_data.data = *(unsigned short *)port->out_rd_ptr; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); port->out_rd_ptr += 2; tr_data.data = *(unsigned short *)port->out_rd_ptr; REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); port->out_rd_ptr += 2; if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; break; } } static void start_dma_out(struct sync_port *port, const char *data, int count) { port->active_tr_descr->buf = (char *) virt_to_phys((char *) data); port->active_tr_descr->after = port->active_tr_descr->buf + count; port->active_tr_descr->intr = 1; port->active_tr_descr->eol = 1; port->prev_tr_descr->eol = 0; DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n", port->prev_tr_descr, port->active_tr_descr)); port->prev_tr_descr = port->active_tr_descr; port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next); if (!port->tr_running) { reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); port->out_context.next = 0; port->out_context.saved_data = (dma_descr_data *)virt_to_phys(port->prev_tr_descr); port->out_context.saved_data_buf = port->prev_tr_descr->buf; DMA_START_CONTEXT(port->regi_dmaout, virt_to_phys((char *)&port->out_context)); tr_cfg.tr_en = regk_sser_yes; REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); DEBUGTRDMA(printk(KERN_DEBUG "dma s\n");); } else { DMA_CONTINUE_DATA(port->regi_dmaout); DEBUGTRDMA(printk(KERN_DEBUG "dma c\n");); } port->tr_running = 1; } static void start_dma_in(sync_port *port) { int i; char *buf; port->writep = port->flip; if (port->writep > port->flip + port->in_buffer_size) { panic("Offset too large in sync serial driver\n"); return; } buf = (char*)virt_to_phys(port->in_buffer); for (i = 0; i < NBR_IN_DESCR; i++) { port->in_descr[i].buf = buf; port->in_descr[i].after = buf + port->inbufchunk; port->in_descr[i].intr = 1; port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]); port->in_descr[i].buf = buf; buf += port->inbufchunk; } /* Link the last descriptor to the first */ port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); port->in_descr[i-1].eol = regk_sser_yes; port->next_rx_desc = &port->in_descr[0]; port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1]; port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); port->in_context.saved_data_buf = port->in_descr[0].buf; DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context)); } #ifdef SYNC_SER_DMA static irqreturn_t tr_interrupt(int irq, void *dev_id) { reg_dma_r_masked_intr masked; reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes}; reg_dma_rw_stat stat; int i; int found = 0; int stop_sser = 0; for (i = 0; i < NBR_PORTS; i++) { sync_port *port = &ports[i]; if (!port->enabled || !port->use_dma) continue; /* IRQ active for the port? */ masked = REG_RD(dma, port->regi_dmaout, r_masked_intr); if (!masked.data) continue; found = 1; /* Check if we should stop the DMA transfer */ stat = REG_RD(dma, port->regi_dmaout, rw_stat); if (stat.list_state == regk_dma_data_at_eol) stop_sser = 1; /* Clear IRQ */ REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr); if (!stop_sser) { /* The DMA has completed a descriptor, EOL was not * encountered, so step relevant descriptor and * datapointers forward. */ int sent; sent = port->catch_tr_descr->after - port->catch_tr_descr->buf; DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t" "in descr %p (ac: %p)\n", port->out_buf_count, sent, port->out_buf_count - sent, port->catch_tr_descr, port->active_tr_descr);); port->out_buf_count -= sent; port->catch_tr_descr = phys_to_virt((int) port->catch_tr_descr->next); port->out_rd_ptr = phys_to_virt((int) port->catch_tr_descr->buf); } else { int i, sent; /* EOL handler. * Note that if an EOL was encountered during the irq * locked section of sync_ser_write the DMA will be * restarted and the eol flag will be cleared. * The remaining descriptors will be traversed by * the descriptor interrupts as usual. */ i = 0; while (!port->catch_tr_descr->eol) { sent = port->catch_tr_descr->after - port->catch_tr_descr->buf; DEBUGOUTBUF(printk(KERN_DEBUG "traversing descr %p -%d (%d)\n", port->catch_tr_descr, sent, port->out_buf_count)); port->out_buf_count -= sent; port->catch_tr_descr = phys_to_virt( (int)port->catch_tr_descr->next); i++; if (i >= NBR_OUT_DESCR) { /* TODO: Reset and recover */ panic("sync_serial: missing eol"); } } sent = port->catch_tr_descr->after - port->catch_tr_descr->buf; DEBUGOUTBUF(printk(KERN_DEBUG "eol at descr %p -%d (%d)\n", port->catch_tr_descr, sent, port->out_buf_count)); port->out_buf_count -= sent; /* Update read pointer to first free byte, we * may already be writing data there. */ port->out_rd_ptr = phys_to_virt((int) port->catch_tr_descr->after); if (port->out_rd_ptr > port->out_buffer + OUT_BUFFER_SIZE) port->out_rd_ptr = port->out_buffer; reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); DEBUGTXINT(printk(KERN_DEBUG "tr_int DMA stop %d, set catch @ %p\n", port->out_buf_count, port->active_tr_descr)); if (port->out_buf_count != 0) printk(KERN_CRIT "sync_ser: buffer not " "empty after eol.\n"); port->catch_tr_descr = port->active_tr_descr; port->tr_running = 0; tr_cfg.tr_en = regk_sser_no; REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); } /* wake up the waiting process */ wake_up_interruptible(&port->out_wait_q); } return IRQ_RETVAL(found); } /* tr_interrupt */ static irqreturn_t rx_interrupt(int irq, void *dev_id) { reg_dma_r_masked_intr masked; reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes}; int i; int found = 0; for (i = 0; i < NBR_PORTS; i++) { sync_port *port = &ports[i]; if (!port->enabled || !port->use_dma ) continue; masked = REG_RD(dma, port->regi_dmain, r_masked_intr); if (masked.data) /* Descriptor interrupt */ { found = 1; while (REG_RD(dma, port->regi_dmain, rw_data) != virt_to_phys(port->next_rx_desc)) { DEBUGRXINT(printk(KERN_DEBUG "!")); if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) { int first_size = port->flip + port->in_buffer_size - port->writep; memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size); memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size); port->writep = port->flip + port->inbufchunk - first_size; } else { memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), port->inbufchunk); port->writep += port->inbufchunk; if (port->writep >= port->flip + port->in_buffer_size) port->writep = port->flip; } if (port->writep == port->readp) { port->full = 1; } port->next_rx_desc->eol = 1; port->prev_rx_desc->eol = 0; /* Cache bug workaround */ flush_dma_descr(port->prev_rx_desc, 0); port->prev_rx_desc = port->next_rx_desc; port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next); /* Cache bug workaround */ flush_dma_descr(port->prev_rx_desc, 1); /* wake up the waiting process */ wake_up_interruptible(&port->in_wait_q); DMA_CONTINUE(port->regi_dmain); REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr); } } } return IRQ_RETVAL(found); } /* rx_interrupt */ #endif /* SYNC_SER_DMA */ #ifdef SYNC_SER_MANUAL static irqreturn_t manual_interrupt(int irq, void *dev_id) { int i; int found = 0; reg_sser_r_masked_intr masked; for (i = 0; i < NBR_PORTS; i++) { sync_port *port = &ports[i]; if (!port->enabled || port->use_dma) { continue; } masked = REG_RD(sser, port->regi_sser, r_masked_intr); if (masked.rdav) /* Data received? */ { reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data); found = 1; /* Read data */ switch(rec_cfg.sample_size) { case 8: *port->writep++ = data.data & 0xff; break; case 12: *port->writep = (data.data & 0x0ff0) >> 4; *(port->writep + 1) = data.data & 0x0f; port->writep+=2; break; case 16: *(unsigned short*)port->writep = data.data; port->writep+=2; break; case 24: *(unsigned int*)port->writep = data.data; port->writep+=3; break; case 32: *(unsigned int*)port->writep = data.data; port->writep+=4; break; } if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */ port->writep = port->flip; if (port->writep == port->readp) { /* receive buffer overrun, discard oldest data */ port->readp++; if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */ port->readp = port->flip; } if (sync_data_avail(port) >= port->inbufchunk) wake_up_interruptible(&port->in_wait_q); /* Wake up application */ } if (masked.trdy) /* Transmitter ready? */ { found = 1; if (port->out_buf_count > 0) /* More data to send */ send_word(port); else /* transmission finished */ { reg_sser_rw_intr_mask intr_mask; intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); intr_mask.trdy = 0; REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); wake_up_interruptible(&port->out_wait_q); /* Wake up application */ } } } return IRQ_RETVAL(found); } #endif module_init(etrax_sync_serial_init);
gpl-2.0
hephaex/kernel
drivers/usb/gadget/s3c-hsudc.c
161
36654
/* linux/drivers/usb/gadget/s3c-hsudc.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S3C24XX USB 2.0 High-speed USB controller gadget driver * * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints. * Each endpoint can be configured as either in or out endpoint. Endpoints * can be configured for Bulk or Interrupt transfer mode. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/otg.h> #include <linux/prefetch.h> #include <linux/platform_data/s3c-hsudc.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> #include <mach/regs-s3c2443-clock.h> #define S3C_HSUDC_REG(x) (x) /* Non-Indexed Registers */ #define S3C_IR S3C_HSUDC_REG(0x00) /* Index Register */ #define S3C_EIR S3C_HSUDC_REG(0x04) /* EP Intr Status */ #define S3C_EIR_EP0 (1<<0) #define S3C_EIER S3C_HSUDC_REG(0x08) /* EP Intr Enable */ #define S3C_FAR S3C_HSUDC_REG(0x0c) /* Gadget Address */ #define S3C_FNR S3C_HSUDC_REG(0x10) /* Frame Number */ #define S3C_EDR S3C_HSUDC_REG(0x14) /* EP Direction */ #define S3C_TR S3C_HSUDC_REG(0x18) /* Test Register */ #define S3C_SSR S3C_HSUDC_REG(0x1c) /* System Status */ #define S3C_SSR_DTZIEN_EN (0xff8f) #define S3C_SSR_ERR (0xff80) #define S3C_SSR_VBUSON (1 << 8) #define S3C_SSR_HSP (1 << 4) #define S3C_SSR_SDE (1 << 3) #define S3C_SSR_RESUME (1 << 2) #define S3C_SSR_SUSPEND (1 << 1) #define S3C_SSR_RESET (1 << 0) #define S3C_SCR S3C_HSUDC_REG(0x20) /* System Control */ #define S3C_SCR_DTZIEN_EN (1 << 14) #define S3C_SCR_RRD_EN (1 << 5) #define S3C_SCR_SUS_EN (1 << 1) #define S3C_SCR_RST_EN (1 << 0) #define S3C_EP0SR S3C_HSUDC_REG(0x24) /* EP0 Status */ #define S3C_EP0SR_EP0_LWO (1 << 6) #define S3C_EP0SR_STALL (1 << 4) #define S3C_EP0SR_TX_SUCCESS (1 << 1) #define S3C_EP0SR_RX_SUCCESS (1 << 0) #define S3C_EP0CR S3C_HSUDC_REG(0x28) /* EP0 Control */ #define S3C_BR(_x) S3C_HSUDC_REG(0x60 + (_x * 4)) /* Indexed Registers */ #define S3C_ESR S3C_HSUDC_REG(0x2c) /* EPn Status */ #define S3C_ESR_FLUSH (1 << 6) #define S3C_ESR_STALL (1 << 5) #define S3C_ESR_LWO (1 << 4) #define S3C_ESR_PSIF_ONE (1 << 2) #define S3C_ESR_PSIF_TWO (2 << 2) #define S3C_ESR_TX_SUCCESS (1 << 1) #define S3C_ESR_RX_SUCCESS (1 << 0) #define S3C_ECR S3C_HSUDC_REG(0x30) /* EPn Control */ #define S3C_ECR_DUEN (1 << 7) #define S3C_ECR_FLUSH (1 << 6) #define S3C_ECR_STALL (1 << 1) #define S3C_ECR_IEMS (1 << 0) #define S3C_BRCR S3C_HSUDC_REG(0x34) /* Read Count */ #define S3C_BWCR S3C_HSUDC_REG(0x38) /* Write Count */ #define S3C_MPR S3C_HSUDC_REG(0x3c) /* Max Pkt Size */ #define WAIT_FOR_SETUP (0) #define DATA_STATE_XMIT (1) #define DATA_STATE_RECV (2) static const char * const s3c_hsudc_supply_names[] = { "vdda", /* analog phy supply, 3.3V */ "vddi", /* digital phy supply, 1.2V */ "vddosc", /* oscillator supply, 1.8V - 3.3V */ }; /** * struct s3c_hsudc_ep - Endpoint representation used by driver. * @ep: USB gadget layer representation of device endpoint. * @name: Endpoint name (as required by ep autoconfiguration). * @dev: Reference to the device controller to which this EP belongs. * @desc: Endpoint descriptor obtained from the gadget driver. * @queue: Transfer request queue for the endpoint. * @stopped: Maintains state of endpoint, set if EP is halted. * @bEndpointAddress: EP address (including direction bit). * @fifo: Base address of EP FIFO. */ struct s3c_hsudc_ep { struct usb_ep ep; char name[20]; struct s3c_hsudc *dev; struct list_head queue; u8 stopped; u8 wedge; u8 bEndpointAddress; void __iomem *fifo; }; /** * struct s3c_hsudc_req - Driver encapsulation of USB gadget transfer request. * @req: Reference to USB gadget transfer request. * @queue: Used for inserting this request to the endpoint request queue. */ struct s3c_hsudc_req { struct usb_request req; struct list_head queue; }; /** * struct s3c_hsudc - Driver's abstraction of the device controller. * @gadget: Instance of usb_gadget which is referenced by gadget driver. * @driver: Reference to currenty active gadget driver. * @dev: The device reference used by probe function. * @lock: Lock to synchronize the usage of Endpoints (EP's are indexed). * @regs: Remapped base address of controller's register space. * irq: IRQ number used by the controller. * uclk: Reference to the controller clock. * ep0state: Current state of EP0. * ep: List of endpoints supported by the controller. */ struct s3c_hsudc { struct usb_gadget gadget; struct usb_gadget_driver *driver; struct device *dev; struct s3c24xx_hsudc_platdata *pd; struct usb_phy *transceiver; struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsudc_supply_names)]; spinlock_t lock; void __iomem *regs; int irq; struct clk *uclk; int ep0state; struct s3c_hsudc_ep ep[]; }; #define ep_maxpacket(_ep) ((_ep)->ep.maxpacket) #define ep_is_in(_ep) ((_ep)->bEndpointAddress & USB_DIR_IN) #define ep_index(_ep) ((_ep)->bEndpointAddress & \ USB_ENDPOINT_NUMBER_MASK) static const char driver_name[] = "s3c-udc"; static const char ep0name[] = "ep0-control"; static inline struct s3c_hsudc_req *our_req(struct usb_request *req) { return container_of(req, struct s3c_hsudc_req, req); } static inline struct s3c_hsudc_ep *our_ep(struct usb_ep *ep) { return container_of(ep, struct s3c_hsudc_ep, ep); } static inline struct s3c_hsudc *to_hsudc(struct usb_gadget *gadget) { return container_of(gadget, struct s3c_hsudc, gadget); } static inline void set_index(struct s3c_hsudc *hsudc, int ep_addr) { ep_addr &= USB_ENDPOINT_NUMBER_MASK; writel(ep_addr, hsudc->regs + S3C_IR); } static inline void __orr32(void __iomem *ptr, u32 val) { writel(readl(ptr) | val, ptr); } static void s3c_hsudc_init_phy(void) { u32 cfg; cfg = readl(S3C2443_PWRCFG) | S3C2443_PWRCFG_USBPHY; writel(cfg, S3C2443_PWRCFG); cfg = readl(S3C2443_URSTCON); cfg |= (S3C2443_URSTCON_FUNCRST | S3C2443_URSTCON_PHYRST); writel(cfg, S3C2443_URSTCON); mdelay(1); cfg = readl(S3C2443_URSTCON); cfg &= ~(S3C2443_URSTCON_FUNCRST | S3C2443_URSTCON_PHYRST); writel(cfg, S3C2443_URSTCON); cfg = readl(S3C2443_PHYCTRL); cfg &= ~(S3C2443_PHYCTRL_CLKSEL | S3C2443_PHYCTRL_DSPORT); cfg |= (S3C2443_PHYCTRL_EXTCLK | S3C2443_PHYCTRL_PLLSEL); writel(cfg, S3C2443_PHYCTRL); cfg = readl(S3C2443_PHYPWR); cfg &= ~(S3C2443_PHYPWR_FSUSPEND | S3C2443_PHYPWR_PLL_PWRDN | S3C2443_PHYPWR_XO_ON | S3C2443_PHYPWR_PLL_REFCLK | S3C2443_PHYPWR_ANALOG_PD); cfg |= S3C2443_PHYPWR_COMMON_ON; writel(cfg, S3C2443_PHYPWR); cfg = readl(S3C2443_UCLKCON); cfg |= (S3C2443_UCLKCON_DETECT_VBUS | S3C2443_UCLKCON_FUNC_CLKEN | S3C2443_UCLKCON_TCLKEN); writel(cfg, S3C2443_UCLKCON); } static void s3c_hsudc_uninit_phy(void) { u32 cfg; cfg = readl(S3C2443_PWRCFG) & ~S3C2443_PWRCFG_USBPHY; writel(cfg, S3C2443_PWRCFG); writel(S3C2443_PHYPWR_FSUSPEND, S3C2443_PHYPWR); cfg = readl(S3C2443_UCLKCON) & ~S3C2443_UCLKCON_FUNC_CLKEN; writel(cfg, S3C2443_UCLKCON); } /** * s3c_hsudc_complete_request - Complete a transfer request. * @hsep: Endpoint to which the request belongs. * @hsreq: Transfer request to be completed. * @status: Transfer completion status for the transfer request. */ static void s3c_hsudc_complete_request(struct s3c_hsudc_ep *hsep, struct s3c_hsudc_req *hsreq, int status) { unsigned int stopped = hsep->stopped; struct s3c_hsudc *hsudc = hsep->dev; list_del_init(&hsreq->queue); hsreq->req.status = status; if (!ep_index(hsep)) { hsudc->ep0state = WAIT_FOR_SETUP; hsep->bEndpointAddress &= ~USB_DIR_IN; } hsep->stopped = 1; spin_unlock(&hsudc->lock); if (hsreq->req.complete != NULL) hsreq->req.complete(&hsep->ep, &hsreq->req); spin_lock(&hsudc->lock); hsep->stopped = stopped; } /** * s3c_hsudc_nuke_ep - Terminate all requests queued for a endpoint. * @hsep: Endpoint for which queued requests have to be terminated. * @status: Transfer completion status for the transfer request. */ static void s3c_hsudc_nuke_ep(struct s3c_hsudc_ep *hsep, int status) { struct s3c_hsudc_req *hsreq; while (!list_empty(&hsep->queue)) { hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); s3c_hsudc_complete_request(hsep, hsreq, status); } } /** * s3c_hsudc_stop_activity - Stop activity on all endpoints. * @hsudc: Device controller for which EP activity is to be stopped. * * All the endpoints are stopped and any pending transfer requests if any on * the endpoint are terminated. */ static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc) { struct s3c_hsudc_ep *hsep; int epnum; hsudc->gadget.speed = USB_SPEED_UNKNOWN; for (epnum = 0; epnum < hsudc->pd->epnum; epnum++) { hsep = &hsudc->ep[epnum]; hsep->stopped = 1; s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN); } } /** * s3c_hsudc_read_setup_pkt - Read the received setup packet from EP0 fifo. * @hsudc: Device controller from which setup packet is to be read. * @buf: The buffer into which the setup packet is read. * * The setup packet received in the EP0 fifo is read and stored into a * given buffer address. */ static void s3c_hsudc_read_setup_pkt(struct s3c_hsudc *hsudc, u16 *buf) { int count; count = readl(hsudc->regs + S3C_BRCR); while (count--) *buf++ = (u16)readl(hsudc->regs + S3C_BR(0)); writel(S3C_EP0SR_RX_SUCCESS, hsudc->regs + S3C_EP0SR); } /** * s3c_hsudc_write_fifo - Write next chunk of transfer data to EP fifo. * @hsep: Endpoint to which the data is to be written. * @hsreq: Transfer request from which the next chunk of data is written. * * Write the next chunk of data from a transfer request to the endpoint FIFO. * If the transfer request completes, 1 is returned, otherwise 0 is returned. */ static int s3c_hsudc_write_fifo(struct s3c_hsudc_ep *hsep, struct s3c_hsudc_req *hsreq) { u16 *buf; u32 max = ep_maxpacket(hsep); u32 count, length; bool is_last; void __iomem *fifo = hsep->fifo; buf = hsreq->req.buf + hsreq->req.actual; prefetch(buf); length = hsreq->req.length - hsreq->req.actual; length = min(length, max); hsreq->req.actual += length; writel(length, hsep->dev->regs + S3C_BWCR); for (count = 0; count < length; count += 2) writel(*buf++, fifo); if (count != max) { is_last = true; } else { if (hsreq->req.length != hsreq->req.actual || hsreq->req.zero) is_last = false; else is_last = true; } if (is_last) { s3c_hsudc_complete_request(hsep, hsreq, 0); return 1; } return 0; } /** * s3c_hsudc_read_fifo - Read the next chunk of data from EP fifo. * @hsep: Endpoint from which the data is to be read. * @hsreq: Transfer request to which the next chunk of data read is written. * * Read the next chunk of data from the endpoint FIFO and a write it to the * transfer request buffer. If the transfer request completes, 1 is returned, * otherwise 0 is returned. */ static int s3c_hsudc_read_fifo(struct s3c_hsudc_ep *hsep, struct s3c_hsudc_req *hsreq) { struct s3c_hsudc *hsudc = hsep->dev; u32 csr, offset; u16 *buf, word; u32 buflen, rcnt, rlen; void __iomem *fifo = hsep->fifo; u32 is_short = 0; offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR; csr = readl(hsudc->regs + offset); if (!(csr & S3C_ESR_RX_SUCCESS)) return -EINVAL; buf = hsreq->req.buf + hsreq->req.actual; prefetchw(buf); buflen = hsreq->req.length - hsreq->req.actual; rcnt = readl(hsudc->regs + S3C_BRCR); rlen = (csr & S3C_ESR_LWO) ? (rcnt * 2 - 1) : (rcnt * 2); hsreq->req.actual += min(rlen, buflen); is_short = (rlen < hsep->ep.maxpacket); while (rcnt-- != 0) { word = (u16)readl(fifo); if (buflen) { *buf++ = word; buflen--; } else { hsreq->req.status = -EOVERFLOW; } } writel(S3C_ESR_RX_SUCCESS, hsudc->regs + offset); if (is_short || hsreq->req.actual == hsreq->req.length) { s3c_hsudc_complete_request(hsep, hsreq, 0); return 1; } return 0; } /** * s3c_hsudc_epin_intr - Handle in-endpoint interrupt. * @hsudc - Device controller for which the interrupt is to be handled. * @ep_idx - Endpoint number on which an interrupt is pending. * * Handles interrupt for a in-endpoint. The interrupts that are handled are * stall and data transmit complete interrupt. */ static void s3c_hsudc_epin_intr(struct s3c_hsudc *hsudc, u32 ep_idx) { struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx]; struct s3c_hsudc_req *hsreq; u32 csr; csr = readl(hsudc->regs + S3C_ESR); if (csr & S3C_ESR_STALL) { writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR); return; } if (csr & S3C_ESR_TX_SUCCESS) { writel(S3C_ESR_TX_SUCCESS, hsudc->regs + S3C_ESR); if (list_empty(&hsep->queue)) return; hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); if ((s3c_hsudc_write_fifo(hsep, hsreq) == 0) && (csr & S3C_ESR_PSIF_TWO)) s3c_hsudc_write_fifo(hsep, hsreq); } } /** * s3c_hsudc_epout_intr - Handle out-endpoint interrupt. * @hsudc - Device controller for which the interrupt is to be handled. * @ep_idx - Endpoint number on which an interrupt is pending. * * Handles interrupt for a out-endpoint. The interrupts that are handled are * stall, flush and data ready interrupt. */ static void s3c_hsudc_epout_intr(struct s3c_hsudc *hsudc, u32 ep_idx) { struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx]; struct s3c_hsudc_req *hsreq; u32 csr; csr = readl(hsudc->regs + S3C_ESR); if (csr & S3C_ESR_STALL) { writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR); return; } if (csr & S3C_ESR_FLUSH) { __orr32(hsudc->regs + S3C_ECR, S3C_ECR_FLUSH); return; } if (csr & S3C_ESR_RX_SUCCESS) { if (list_empty(&hsep->queue)) return; hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); if (((s3c_hsudc_read_fifo(hsep, hsreq)) == 0) && (csr & S3C_ESR_PSIF_TWO)) s3c_hsudc_read_fifo(hsep, hsreq); } } /** s3c_hsudc_set_halt - Set or clear a endpoint halt. * @_ep: Endpoint on which halt has to be set or cleared. * @value: 1 for setting halt on endpoint, 0 to clear halt. * * Set or clear endpoint halt. If halt is set, the endpoint is stopped. * If halt is cleared, for in-endpoints, if there are any pending * transfer requests, transfers are started. */ static int s3c_hsudc_set_halt(struct usb_ep *_ep, int value) { struct s3c_hsudc_ep *hsep = our_ep(_ep); struct s3c_hsudc *hsudc = hsep->dev; struct s3c_hsudc_req *hsreq; unsigned long irqflags; u32 ecr; u32 offset; if (value && ep_is_in(hsep) && !list_empty(&hsep->queue)) return -EAGAIN; spin_lock_irqsave(&hsudc->lock, irqflags); set_index(hsudc, ep_index(hsep)); offset = (ep_index(hsep)) ? S3C_ECR : S3C_EP0CR; ecr = readl(hsudc->regs + offset); if (value) { ecr |= S3C_ECR_STALL; if (ep_index(hsep)) ecr |= S3C_ECR_FLUSH; hsep->stopped = 1; } else { ecr &= ~S3C_ECR_STALL; hsep->stopped = hsep->wedge = 0; } writel(ecr, hsudc->regs + offset); if (ep_is_in(hsep) && !list_empty(&hsep->queue) && !value) { hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); if (hsreq) s3c_hsudc_write_fifo(hsep, hsreq); } spin_unlock_irqrestore(&hsudc->lock, irqflags); return 0; } /** s3c_hsudc_set_wedge - Sets the halt feature with the clear requests ignored * @_ep: Endpoint on which wedge has to be set. * * Sets the halt feature with the clear requests ignored. */ static int s3c_hsudc_set_wedge(struct usb_ep *_ep) { struct s3c_hsudc_ep *hsep = our_ep(_ep); if (!hsep) return -EINVAL; hsep->wedge = 1; return usb_ep_set_halt(_ep); } /** s3c_hsudc_handle_reqfeat - Handle set feature or clear feature requests. * @_ep: Device controller on which the set/clear feature needs to be handled. * @ctrl: Control request as received on the endpoint 0. * * Handle set feature or clear feature control requests on the control endpoint. */ static int s3c_hsudc_handle_reqfeat(struct s3c_hsudc *hsudc, struct usb_ctrlrequest *ctrl) { struct s3c_hsudc_ep *hsep; bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE); u8 ep_num = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK; if (ctrl->bRequestType == USB_RECIP_ENDPOINT) { hsep = &hsudc->ep[ep_num]; switch (le16_to_cpu(ctrl->wValue)) { case USB_ENDPOINT_HALT: if (set || (!set && !hsep->wedge)) s3c_hsudc_set_halt(&hsep->ep, set); return 0; } } return -ENOENT; } /** * s3c_hsudc_process_req_status - Handle get status control request. * @hsudc: Device controller on which get status request has be handled. * @ctrl: Control request as received on the endpoint 0. * * Handle get status control request received on control endpoint. */ static void s3c_hsudc_process_req_status(struct s3c_hsudc *hsudc, struct usb_ctrlrequest *ctrl) { struct s3c_hsudc_ep *hsep0 = &hsudc->ep[0]; struct s3c_hsudc_req hsreq; struct s3c_hsudc_ep *hsep; __le16 reply; u8 epnum; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: reply = cpu_to_le16(0); break; case USB_RECIP_INTERFACE: reply = cpu_to_le16(0); break; case USB_RECIP_ENDPOINT: epnum = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK; hsep = &hsudc->ep[epnum]; reply = cpu_to_le16(hsep->stopped ? 1 : 0); break; } INIT_LIST_HEAD(&hsreq.queue); hsreq.req.length = 2; hsreq.req.buf = &reply; hsreq.req.actual = 0; hsreq.req.complete = NULL; s3c_hsudc_write_fifo(hsep0, &hsreq); } /** * s3c_hsudc_process_setup - Process control request received on endpoint 0. * @hsudc: Device controller on which control request has been received. * * Read the control request received on endpoint 0, decode it and handle * the request. */ static void s3c_hsudc_process_setup(struct s3c_hsudc *hsudc) { struct s3c_hsudc_ep *hsep = &hsudc->ep[0]; struct usb_ctrlrequest ctrl = {0}; int ret; s3c_hsudc_nuke_ep(hsep, -EPROTO); s3c_hsudc_read_setup_pkt(hsudc, (u16 *)&ctrl); if (ctrl.bRequestType & USB_DIR_IN) { hsep->bEndpointAddress |= USB_DIR_IN; hsudc->ep0state = DATA_STATE_XMIT; } else { hsep->bEndpointAddress &= ~USB_DIR_IN; hsudc->ep0state = DATA_STATE_RECV; } switch (ctrl.bRequest) { case USB_REQ_SET_ADDRESS: if (ctrl.bRequestType != (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) break; hsudc->ep0state = WAIT_FOR_SETUP; return; case USB_REQ_GET_STATUS: if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) break; s3c_hsudc_process_req_status(hsudc, &ctrl); return; case USB_REQ_SET_FEATURE: case USB_REQ_CLEAR_FEATURE: if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) break; s3c_hsudc_handle_reqfeat(hsudc, &ctrl); hsudc->ep0state = WAIT_FOR_SETUP; return; } if (hsudc->driver) { spin_unlock(&hsudc->lock); ret = hsudc->driver->setup(&hsudc->gadget, &ctrl); spin_lock(&hsudc->lock); if (ctrl.bRequest == USB_REQ_SET_CONFIGURATION) { hsep->bEndpointAddress &= ~USB_DIR_IN; hsudc->ep0state = WAIT_FOR_SETUP; } if (ret < 0) { dev_err(hsudc->dev, "setup failed, returned %d\n", ret); s3c_hsudc_set_halt(&hsep->ep, 1); hsudc->ep0state = WAIT_FOR_SETUP; hsep->bEndpointAddress &= ~USB_DIR_IN; } } } /** s3c_hsudc_handle_ep0_intr - Handle endpoint 0 interrupt. * @hsudc: Device controller on which endpoint 0 interrupt has occured. * * Handle endpoint 0 interrupt when it occurs. EP0 interrupt could occur * when a stall handshake is sent to host or data is sent/received on * endpoint 0. */ static void s3c_hsudc_handle_ep0_intr(struct s3c_hsudc *hsudc) { struct s3c_hsudc_ep *hsep = &hsudc->ep[0]; struct s3c_hsudc_req *hsreq; u32 csr = readl(hsudc->regs + S3C_EP0SR); u32 ecr; if (csr & S3C_EP0SR_STALL) { ecr = readl(hsudc->regs + S3C_EP0CR); ecr &= ~(S3C_ECR_STALL | S3C_ECR_FLUSH); writel(ecr, hsudc->regs + S3C_EP0CR); writel(S3C_EP0SR_STALL, hsudc->regs + S3C_EP0SR); hsep->stopped = 0; s3c_hsudc_nuke_ep(hsep, -ECONNABORTED); hsudc->ep0state = WAIT_FOR_SETUP; hsep->bEndpointAddress &= ~USB_DIR_IN; return; } if (csr & S3C_EP0SR_TX_SUCCESS) { writel(S3C_EP0SR_TX_SUCCESS, hsudc->regs + S3C_EP0SR); if (ep_is_in(hsep)) { if (list_empty(&hsep->queue)) return; hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); s3c_hsudc_write_fifo(hsep, hsreq); } } if (csr & S3C_EP0SR_RX_SUCCESS) { if (hsudc->ep0state == WAIT_FOR_SETUP) s3c_hsudc_process_setup(hsudc); else { if (!ep_is_in(hsep)) { if (list_empty(&hsep->queue)) return; hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); s3c_hsudc_read_fifo(hsep, hsreq); } } } } /** * s3c_hsudc_ep_enable - Enable a endpoint. * @_ep: The endpoint to be enabled. * @desc: Endpoint descriptor. * * Enables a endpoint when called from the gadget driver. Endpoint stall if * any is cleared, transfer type is configured and endpoint interrupt is * enabled. */ static int s3c_hsudc_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct s3c_hsudc_ep *hsep; struct s3c_hsudc *hsudc; unsigned long flags; u32 ecr = 0; hsep = our_ep(_ep); if (!_ep || !desc || _ep->name == ep0name || desc->bDescriptorType != USB_DT_ENDPOINT || hsep->bEndpointAddress != desc->bEndpointAddress || ep_maxpacket(hsep) < usb_endpoint_maxp(desc)) return -EINVAL; if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK && usb_endpoint_maxp(desc) != ep_maxpacket(hsep)) || !desc->wMaxPacketSize) return -ERANGE; hsudc = hsep->dev; if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; spin_lock_irqsave(&hsudc->lock, flags); set_index(hsudc, hsep->bEndpointAddress); ecr |= ((usb_endpoint_xfer_int(desc)) ? S3C_ECR_IEMS : S3C_ECR_DUEN); writel(ecr, hsudc->regs + S3C_ECR); hsep->stopped = hsep->wedge = 0; hsep->ep.desc = desc; hsep->ep.maxpacket = usb_endpoint_maxp(desc); s3c_hsudc_set_halt(_ep, 0); __set_bit(ep_index(hsep), hsudc->regs + S3C_EIER); spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } /** * s3c_hsudc_ep_disable - Disable a endpoint. * @_ep: The endpoint to be disabled. * @desc: Endpoint descriptor. * * Disables a endpoint when called from the gadget driver. */ static int s3c_hsudc_ep_disable(struct usb_ep *_ep) { struct s3c_hsudc_ep *hsep = our_ep(_ep); struct s3c_hsudc *hsudc = hsep->dev; unsigned long flags; if (!_ep || !hsep->ep.desc) return -EINVAL; spin_lock_irqsave(&hsudc->lock, flags); set_index(hsudc, hsep->bEndpointAddress); __clear_bit(ep_index(hsep), hsudc->regs + S3C_EIER); s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN); hsep->ep.desc = NULL; hsep->stopped = 1; spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } /** * s3c_hsudc_alloc_request - Allocate a new request. * @_ep: Endpoint for which request is allocated (not used). * @gfp_flags: Flags used for the allocation. * * Allocates a single transfer request structure when called from gadget driver. */ static struct usb_request *s3c_hsudc_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct s3c_hsudc_req *hsreq; hsreq = kzalloc(sizeof(*hsreq), gfp_flags); if (!hsreq) return NULL; INIT_LIST_HEAD(&hsreq->queue); return &hsreq->req; } /** * s3c_hsudc_free_request - Deallocate a request. * @ep: Endpoint for which request is deallocated (not used). * @_req: Request to be deallocated. * * Allocates a single transfer request structure when called from gadget driver. */ static void s3c_hsudc_free_request(struct usb_ep *ep, struct usb_request *_req) { struct s3c_hsudc_req *hsreq; hsreq = our_req(_req); WARN_ON(!list_empty(&hsreq->queue)); kfree(hsreq); } /** * s3c_hsudc_queue - Queue a transfer request for the endpoint. * @_ep: Endpoint for which the request is queued. * @_req: Request to be queued. * @gfp_flags: Not used. * * Start or enqueue a request for a endpoint when called from gadget driver. */ static int s3c_hsudc_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct s3c_hsudc_req *hsreq; struct s3c_hsudc_ep *hsep; struct s3c_hsudc *hsudc; unsigned long flags; u32 offset; u32 csr; hsreq = our_req(_req); if ((!_req || !_req->complete || !_req->buf || !list_empty(&hsreq->queue))) return -EINVAL; hsep = our_ep(_ep); hsudc = hsep->dev; if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; spin_lock_irqsave(&hsudc->lock, flags); set_index(hsudc, hsep->bEndpointAddress); _req->status = -EINPROGRESS; _req->actual = 0; if (!ep_index(hsep) && _req->length == 0) { hsudc->ep0state = WAIT_FOR_SETUP; s3c_hsudc_complete_request(hsep, hsreq, 0); spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } if (list_empty(&hsep->queue) && !hsep->stopped) { offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR; if (ep_is_in(hsep)) { csr = readl(hsudc->regs + offset); if (!(csr & S3C_ESR_TX_SUCCESS) && (s3c_hsudc_write_fifo(hsep, hsreq) == 1)) hsreq = NULL; } else { csr = readl(hsudc->regs + offset); if ((csr & S3C_ESR_RX_SUCCESS) && (s3c_hsudc_read_fifo(hsep, hsreq) == 1)) hsreq = NULL; } } if (hsreq) list_add_tail(&hsreq->queue, &hsep->queue); spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } /** * s3c_hsudc_dequeue - Dequeue a transfer request from an endpoint. * @_ep: Endpoint from which the request is dequeued. * @_req: Request to be dequeued. * * Dequeue a request from a endpoint when called from gadget driver. */ static int s3c_hsudc_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct s3c_hsudc_ep *hsep = our_ep(_ep); struct s3c_hsudc *hsudc = hsep->dev; struct s3c_hsudc_req *hsreq; unsigned long flags; hsep = our_ep(_ep); if (!_ep || hsep->ep.name == ep0name) return -EINVAL; spin_lock_irqsave(&hsudc->lock, flags); list_for_each_entry(hsreq, &hsep->queue, queue) { if (&hsreq->req == _req) break; } if (&hsreq->req != _req) { spin_unlock_irqrestore(&hsudc->lock, flags); return -EINVAL; } set_index(hsudc, hsep->bEndpointAddress); s3c_hsudc_complete_request(hsep, hsreq, -ECONNRESET); spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } static struct usb_ep_ops s3c_hsudc_ep_ops = { .enable = s3c_hsudc_ep_enable, .disable = s3c_hsudc_ep_disable, .alloc_request = s3c_hsudc_alloc_request, .free_request = s3c_hsudc_free_request, .queue = s3c_hsudc_queue, .dequeue = s3c_hsudc_dequeue, .set_halt = s3c_hsudc_set_halt, .set_wedge = s3c_hsudc_set_wedge, }; /** * s3c_hsudc_initep - Initialize a endpoint to default state. * @hsudc - Reference to the device controller. * @hsep - Endpoint to be initialized. * @epnum - Address to be assigned to the endpoint. * * Initialize a endpoint with default configuration. */ static void s3c_hsudc_initep(struct s3c_hsudc *hsudc, struct s3c_hsudc_ep *hsep, int epnum) { char *dir; if ((epnum % 2) == 0) { dir = "out"; } else { dir = "in"; hsep->bEndpointAddress = USB_DIR_IN; } hsep->bEndpointAddress |= epnum; if (epnum) snprintf(hsep->name, sizeof(hsep->name), "ep%d%s", epnum, dir); else snprintf(hsep->name, sizeof(hsep->name), "%s", ep0name); INIT_LIST_HEAD(&hsep->queue); INIT_LIST_HEAD(&hsep->ep.ep_list); if (epnum) list_add_tail(&hsep->ep.ep_list, &hsudc->gadget.ep_list); hsep->dev = hsudc; hsep->ep.name = hsep->name; hsep->ep.maxpacket = epnum ? 512 : 64; hsep->ep.ops = &s3c_hsudc_ep_ops; hsep->fifo = hsudc->regs + S3C_BR(epnum); hsep->ep.desc = NULL; hsep->stopped = 0; hsep->wedge = 0; set_index(hsudc, epnum); writel(hsep->ep.maxpacket, hsudc->regs + S3C_MPR); } /** * s3c_hsudc_setup_ep - Configure all endpoints to default state. * @hsudc: Reference to device controller. * * Configures all endpoints to default state. */ static void s3c_hsudc_setup_ep(struct s3c_hsudc *hsudc) { int epnum; hsudc->ep0state = WAIT_FOR_SETUP; INIT_LIST_HEAD(&hsudc->gadget.ep_list); for (epnum = 0; epnum < hsudc->pd->epnum; epnum++) s3c_hsudc_initep(hsudc, &hsudc->ep[epnum], epnum); } /** * s3c_hsudc_reconfig - Reconfigure the device controller to default state. * @hsudc: Reference to device controller. * * Reconfigures the device controller registers to a default state. */ static void s3c_hsudc_reconfig(struct s3c_hsudc *hsudc) { writel(0xAA, hsudc->regs + S3C_EDR); writel(1, hsudc->regs + S3C_EIER); writel(0, hsudc->regs + S3C_TR); writel(S3C_SCR_DTZIEN_EN | S3C_SCR_RRD_EN | S3C_SCR_SUS_EN | S3C_SCR_RST_EN, hsudc->regs + S3C_SCR); writel(0, hsudc->regs + S3C_EP0CR); s3c_hsudc_setup_ep(hsudc); } /** * s3c_hsudc_irq - Interrupt handler for device controller. * @irq: Not used. * @_dev: Reference to the device controller. * * Interrupt handler for the device controller. This handler handles controller * interrupts and endpoint interrupts. */ static irqreturn_t s3c_hsudc_irq(int irq, void *_dev) { struct s3c_hsudc *hsudc = _dev; struct s3c_hsudc_ep *hsep; u32 ep_intr; u32 sys_status; u32 ep_idx; spin_lock(&hsudc->lock); sys_status = readl(hsudc->regs + S3C_SSR); ep_intr = readl(hsudc->regs + S3C_EIR) & 0x3FF; if (!ep_intr && !(sys_status & S3C_SSR_DTZIEN_EN)) { spin_unlock(&hsudc->lock); return IRQ_HANDLED; } if (sys_status) { if (sys_status & S3C_SSR_VBUSON) writel(S3C_SSR_VBUSON, hsudc->regs + S3C_SSR); if (sys_status & S3C_SSR_ERR) writel(S3C_SSR_ERR, hsudc->regs + S3C_SSR); if (sys_status & S3C_SSR_SDE) { writel(S3C_SSR_SDE, hsudc->regs + S3C_SSR); hsudc->gadget.speed = (sys_status & S3C_SSR_HSP) ? USB_SPEED_HIGH : USB_SPEED_FULL; } if (sys_status & S3C_SSR_SUSPEND) { writel(S3C_SSR_SUSPEND, hsudc->regs + S3C_SSR); if (hsudc->gadget.speed != USB_SPEED_UNKNOWN && hsudc->driver && hsudc->driver->suspend) hsudc->driver->suspend(&hsudc->gadget); } if (sys_status & S3C_SSR_RESUME) { writel(S3C_SSR_RESUME, hsudc->regs + S3C_SSR); if (hsudc->gadget.speed != USB_SPEED_UNKNOWN && hsudc->driver && hsudc->driver->resume) hsudc->driver->resume(&hsudc->gadget); } if (sys_status & S3C_SSR_RESET) { writel(S3C_SSR_RESET, hsudc->regs + S3C_SSR); for (ep_idx = 0; ep_idx < hsudc->pd->epnum; ep_idx++) { hsep = &hsudc->ep[ep_idx]; hsep->stopped = 1; s3c_hsudc_nuke_ep(hsep, -ECONNRESET); } s3c_hsudc_reconfig(hsudc); hsudc->ep0state = WAIT_FOR_SETUP; } } if (ep_intr & S3C_EIR_EP0) { writel(S3C_EIR_EP0, hsudc->regs + S3C_EIR); set_index(hsudc, 0); s3c_hsudc_handle_ep0_intr(hsudc); } ep_intr >>= 1; ep_idx = 1; while (ep_intr) { if (ep_intr & 1) { hsep = &hsudc->ep[ep_idx]; set_index(hsudc, ep_idx); writel(1 << ep_idx, hsudc->regs + S3C_EIR); if (ep_is_in(hsep)) s3c_hsudc_epin_intr(hsudc, ep_idx); else s3c_hsudc_epout_intr(hsudc, ep_idx); } ep_intr >>= 1; ep_idx++; } spin_unlock(&hsudc->lock); return IRQ_HANDLED; } static int s3c_hsudc_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct s3c_hsudc *hsudc = to_hsudc(gadget); int ret; if (!driver || driver->max_speed < USB_SPEED_FULL || !driver->setup) return -EINVAL; if (!hsudc) return -ENODEV; if (hsudc->driver) return -EBUSY; hsudc->driver = driver; ret = regulator_bulk_enable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies); if (ret != 0) { dev_err(hsudc->dev, "failed to enable supplies: %d\n", ret); goto err_supplies; } /* connect to bus through transceiver */ if (!IS_ERR_OR_NULL(hsudc->transceiver)) { ret = otg_set_peripheral(hsudc->transceiver->otg, &hsudc->gadget); if (ret) { dev_err(hsudc->dev, "%s: can't bind to transceiver\n", hsudc->gadget.name); goto err_otg; } } enable_irq(hsudc->irq); dev_info(hsudc->dev, "bound driver %s\n", driver->driver.name); s3c_hsudc_reconfig(hsudc); pm_runtime_get_sync(hsudc->dev); s3c_hsudc_init_phy(); if (hsudc->pd->gpio_init) hsudc->pd->gpio_init(); return 0; err_otg: regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies); err_supplies: hsudc->driver = NULL; return ret; } static int s3c_hsudc_stop(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { struct s3c_hsudc *hsudc = to_hsudc(gadget); unsigned long flags; if (!hsudc) return -ENODEV; if (!driver || driver != hsudc->driver) return -EINVAL; spin_lock_irqsave(&hsudc->lock, flags); hsudc->driver = NULL; hsudc->gadget.speed = USB_SPEED_UNKNOWN; s3c_hsudc_uninit_phy(); pm_runtime_put(hsudc->dev); if (hsudc->pd->gpio_uninit) hsudc->pd->gpio_uninit(); s3c_hsudc_stop_activity(hsudc); spin_unlock_irqrestore(&hsudc->lock, flags); if (!IS_ERR_OR_NULL(hsudc->transceiver)) (void) otg_set_peripheral(hsudc->transceiver->otg, NULL); disable_irq(hsudc->irq); regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies); dev_info(hsudc->dev, "unregistered gadget driver '%s'\n", driver->driver.name); return 0; } static inline u32 s3c_hsudc_read_frameno(struct s3c_hsudc *hsudc) { return readl(hsudc->regs + S3C_FNR) & 0x3FF; } static int s3c_hsudc_gadget_getframe(struct usb_gadget *gadget) { return s3c_hsudc_read_frameno(to_hsudc(gadget)); } static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA) { struct s3c_hsudc *hsudc = to_hsudc(gadget); if (!hsudc) return -ENODEV; if (!IS_ERR_OR_NULL(hsudc->transceiver)) return usb_phy_set_power(hsudc->transceiver, mA); return -EOPNOTSUPP; } static const struct usb_gadget_ops s3c_hsudc_gadget_ops = { .get_frame = s3c_hsudc_gadget_getframe, .udc_start = s3c_hsudc_start, .udc_stop = s3c_hsudc_stop, .vbus_draw = s3c_hsudc_vbus_draw, }; static int s3c_hsudc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct s3c_hsudc *hsudc; struct s3c24xx_hsudc_platdata *pd = dev_get_platdata(&pdev->dev); int ret, i; hsudc = devm_kzalloc(&pdev->dev, sizeof(struct s3c_hsudc) + sizeof(struct s3c_hsudc_ep) * pd->epnum, GFP_KERNEL); if (!hsudc) { dev_err(dev, "cannot allocate memory\n"); return -ENOMEM; } platform_set_drvdata(pdev, dev); hsudc->dev = dev; hsudc->pd = dev_get_platdata(&pdev->dev); hsudc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2); for (i = 0; i < ARRAY_SIZE(hsudc->supplies); i++) hsudc->supplies[i].supply = s3c_hsudc_supply_names[i]; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hsudc->supplies), hsudc->supplies); if (ret != 0) { dev_err(dev, "failed to request supplies: %d\n", ret); goto err_supplies; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hsudc->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(hsudc->regs)) { ret = PTR_ERR(hsudc->regs); goto err_res; } spin_lock_init(&hsudc->lock); hsudc->gadget.max_speed = USB_SPEED_HIGH; hsudc->gadget.ops = &s3c_hsudc_gadget_ops; hsudc->gadget.name = dev_name(dev); hsudc->gadget.ep0 = &hsudc->ep[0].ep; hsudc->gadget.is_otg = 0; hsudc->gadget.is_a_peripheral = 0; hsudc->gadget.speed = USB_SPEED_UNKNOWN; s3c_hsudc_setup_ep(hsudc); ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(dev, "unable to obtain IRQ number\n"); goto err_res; } hsudc->irq = ret; ret = devm_request_irq(&pdev->dev, hsudc->irq, s3c_hsudc_irq, 0, driver_name, hsudc); if (ret < 0) { dev_err(dev, "irq request failed\n"); goto err_res; } hsudc->uclk = devm_clk_get(&pdev->dev, "usb-device"); if (IS_ERR(hsudc->uclk)) { dev_err(dev, "failed to find usb-device clock source\n"); ret = PTR_ERR(hsudc->uclk); goto err_res; } clk_enable(hsudc->uclk); local_irq_disable(); disable_irq(hsudc->irq); local_irq_enable(); ret = usb_add_gadget_udc(&pdev->dev, &hsudc->gadget); if (ret) goto err_add_udc; pm_runtime_enable(dev); return 0; err_add_udc: err_add_device: clk_disable(hsudc->uclk); err_res: if (!IS_ERR_OR_NULL(hsudc->transceiver)) usb_put_phy(hsudc->transceiver); err_supplies: return ret; } static struct platform_driver s3c_hsudc_driver = { .driver = { .owner = THIS_MODULE, .name = "s3c-hsudc", }, .probe = s3c_hsudc_probe, }; module_platform_driver(s3c_hsudc_driver); MODULE_DESCRIPTION("Samsung S3C24XX USB high-speed controller driver"); MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:s3c-hsudc");
gpl-2.0
insop/sched-deadline2
drivers/usb/gadget/f_subset.c
161
14177
/* * f_subset.c -- "CDC Subset" Ethernet link function driver * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/etherdevice.h> #include "u_ether.h" /* * This function packages a simple "CDC Subset" Ethernet port with no real * control mechanisms; just raw data transfer over two bulk endpoints. * The data transfer model is exactly that of CDC Ethernet, which is * why we call it the "CDC Subset". * * Because it's not standardized, this has some interoperability issues. * They mostly relate to driver binding, since the data transfer model is * so simple (CDC Ethernet). The original versions of this protocol used * specific product/vendor IDs: byteswapped IDs for Digital Equipment's * SA-1100 "Itsy" board, which could run Linux 2.4 kernels and supported * daughtercards with USB peripheral connectors. (It was used more often * with other boards, using the Itsy identifiers.) Linux hosts recognized * this with CONFIG_USB_ARMLINUX; these devices have only one configuration * and one interface. * * At some point, MCCI defined a (nonconformant) CDC MDLM variant called * "SAFE", which happens to have a mode which is identical to the "CDC * Subset" in terms of data transfer and lack of control model. This was * adopted by later Sharp Zaurus models, and by some other software which * Linux hosts recognize with CONFIG_USB_NET_ZAURUS. * * Because Microsoft's RNDIS drivers are far from robust, we added a few * descriptors to the CDC Subset code, making this code look like a SAFE * implementation. This lets you use MCCI's host side MS-Windows drivers * if you get fed up with RNDIS. It also makes it easier for composite * drivers to work, since they can use class based binding instead of * caring about specific product and vendor IDs. */ struct f_gether { struct gether port; char ethaddr[14]; }; static inline struct f_gether *func_to_geth(struct usb_function *f) { return container_of(f, struct f_gether, port.func); } /*-------------------------------------------------------------------------*/ /* * "Simple" CDC-subset option is a simple vendor-neutral model that most * full speed controllers can handle: one interface, two bulk endpoints. * To assist host side drivers, we fancy it up a bit, and add descriptors so * some host side drivers will understand it as a "SAFE" variant. * * "SAFE" loosely follows CDC WMC MDLM, violating the spec in various ways. * Data endpoints live in the control interface, there's no data interface. * And it's not used to talk to a cell phone radio. */ /* interface descriptor: */ static struct usb_interface_descriptor subset_data_intf __initdata = { .bLength = sizeof subset_data_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ .bAlternateSetting = 0, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, .bInterfaceProtocol = 0, /* .iInterface = DYNAMIC */ }; static struct usb_cdc_header_desc mdlm_header_desc __initdata = { .bLength = sizeof mdlm_header_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_HEADER_TYPE, .bcdCDC = cpu_to_le16(0x0110), }; static struct usb_cdc_mdlm_desc mdlm_desc __initdata = { .bLength = sizeof mdlm_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_MDLM_TYPE, .bcdVersion = cpu_to_le16(0x0100), .bGUID = { 0x5d, 0x34, 0xcf, 0x66, 0x11, 0x18, 0x11, 0xd6, 0xa2, 0x1a, 0x00, 0x01, 0x02, 0xca, 0x9a, 0x7f, }, }; /* since "usb_cdc_mdlm_detail_desc" is a variable length structure, we * can't really use its struct. All we do here is say that we're using * the submode of "SAFE" which directly matches the CDC Subset. */ static u8 mdlm_detail_desc[] __initdata = { 6, USB_DT_CS_INTERFACE, USB_CDC_MDLM_DETAIL_TYPE, 0, /* "SAFE" */ 0, /* network control capabilities (none) */ 0, /* network data capabilities ("raw" encapsulation) */ }; static struct usb_cdc_ether_desc ether_desc __initdata = { .bLength = sizeof ether_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_ETHERNET_TYPE, /* this descriptor actually adds value, surprise! */ /* .iMACAddress = DYNAMIC */ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */ .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN), .wNumberMCFilters = cpu_to_le16(0), .bNumberPowerFilters = 0, }; /* full speed support: */ static struct usb_endpoint_descriptor fs_subset_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor fs_subset_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *fs_eth_function[] __initdata = { (struct usb_descriptor_header *) &subset_data_intf, (struct usb_descriptor_header *) &mdlm_header_desc, (struct usb_descriptor_header *) &mdlm_desc, (struct usb_descriptor_header *) &mdlm_detail_desc, (struct usb_descriptor_header *) &ether_desc, (struct usb_descriptor_header *) &fs_subset_in_desc, (struct usb_descriptor_header *) &fs_subset_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor hs_subset_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor hs_subset_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *hs_eth_function[] __initdata = { (struct usb_descriptor_header *) &subset_data_intf, (struct usb_descriptor_header *) &mdlm_header_desc, (struct usb_descriptor_header *) &mdlm_desc, (struct usb_descriptor_header *) &mdlm_detail_desc, (struct usb_descriptor_header *) &ether_desc, (struct usb_descriptor_header *) &hs_subset_in_desc, (struct usb_descriptor_header *) &hs_subset_out_desc, NULL, }; /* super speed support: */ static struct usb_endpoint_descriptor ss_subset_in_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_endpoint_descriptor ss_subset_out_desc __initdata = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(1024), }; static struct usb_ss_ep_comp_descriptor ss_subset_bulk_comp_desc __initdata = { .bLength = sizeof ss_subset_bulk_comp_desc, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* the following 2 values can be tweaked if necessary */ /* .bMaxBurst = 0, */ /* .bmAttributes = 0, */ }; static struct usb_descriptor_header *ss_eth_function[] __initdata = { (struct usb_descriptor_header *) &subset_data_intf, (struct usb_descriptor_header *) &mdlm_header_desc, (struct usb_descriptor_header *) &mdlm_desc, (struct usb_descriptor_header *) &mdlm_detail_desc, (struct usb_descriptor_header *) &ether_desc, (struct usb_descriptor_header *) &ss_subset_in_desc, (struct usb_descriptor_header *) &ss_subset_bulk_comp_desc, (struct usb_descriptor_header *) &ss_subset_out_desc, (struct usb_descriptor_header *) &ss_subset_bulk_comp_desc, NULL, }; /* string descriptors: */ static struct usb_string geth_string_defs[] = { [0].s = "CDC Ethernet Subset/SAFE", [1].s = NULL /* DYNAMIC */, { } /* end of list */ }; static struct usb_gadget_strings geth_string_table = { .language = 0x0409, /* en-us */ .strings = geth_string_defs, }; static struct usb_gadget_strings *geth_strings[] = { &geth_string_table, NULL, }; /*-------------------------------------------------------------------------*/ static int geth_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_gether *geth = func_to_geth(f); struct usb_composite_dev *cdev = f->config->cdev; struct net_device *net; /* we know alt == 0, so this is an activation or a reset */ if (geth->port.in_ep->driver_data) { DBG(cdev, "reset cdc subset\n"); gether_disconnect(&geth->port); } DBG(cdev, "init + activate cdc subset\n"); if (config_ep_by_speed(cdev->gadget, f, geth->port.in_ep) || config_ep_by_speed(cdev->gadget, f, geth->port.out_ep)) { geth->port.in_ep->desc = NULL; geth->port.out_ep->desc = NULL; return -EINVAL; } net = gether_connect(&geth->port); return IS_ERR(net) ? PTR_ERR(net) : 0; } static void geth_disable(struct usb_function *f) { struct f_gether *geth = func_to_geth(f); struct usb_composite_dev *cdev = f->config->cdev; DBG(cdev, "net deactivated\n"); gether_disconnect(&geth->port); } /*-------------------------------------------------------------------------*/ /* serial function driver setup/binding */ static int __init geth_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_gether *geth = func_to_geth(f); int status; struct usb_ep *ep; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; subset_data_intf.bInterfaceNumber = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_in_desc); if (!ep) goto fail; geth->port.in_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_out_desc); if (!ep) goto fail; geth->port.out_ep = ep; ep->driver_data = cdev; /* claim */ /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(fs_eth_function); if (!f->descriptors) goto fail; /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { hs_subset_in_desc.bEndpointAddress = fs_subset_in_desc.bEndpointAddress; hs_subset_out_desc.bEndpointAddress = fs_subset_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(hs_eth_function); if (!f->hs_descriptors) goto fail; } if (gadget_is_superspeed(c->cdev->gadget)) { ss_subset_in_desc.bEndpointAddress = fs_subset_in_desc.bEndpointAddress; ss_subset_out_desc.bEndpointAddress = fs_subset_out_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->ss_descriptors = usb_copy_descriptors(ss_eth_function); if (!f->ss_descriptors) goto fail; } /* NOTE: all that is done without knowing or caring about * the network link ... which is unavailable to this code * until we're activated via set_alt(). */ DBG(cdev, "CDC Subset: %s speed IN/%s OUT/%s\n", gadget_is_superspeed(c->cdev->gadget) ? "super" : gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", geth->port.in_ep->name, geth->port.out_ep->name); return 0; fail: if (f->descriptors) usb_free_descriptors(f->descriptors); if (f->hs_descriptors) usb_free_descriptors(f->hs_descriptors); /* we might as well release our claims on endpoints */ if (geth->port.out_ep->desc) geth->port.out_ep->driver_data = NULL; if (geth->port.in_ep->desc) geth->port.in_ep->driver_data = NULL; ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); return status; } static void geth_unbind(struct usb_configuration *c, struct usb_function *f) { if (gadget_is_superspeed(c->cdev->gadget)) usb_free_descriptors(f->ss_descriptors); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); geth_string_defs[1].s = NULL; kfree(func_to_geth(f)); } /** * geth_bind_config - add CDC Subset network link to a configuration * @c: the configuration to support the network link * @ethaddr: a buffer in which the ethernet address of the host side * side of the link was recorded * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. * * Caller must have called @gether_setup(). Caller is also responsible * for calling @gether_cleanup() before module unload. */ int __init geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]) { struct f_gether *geth; int status; if (!ethaddr) return -EINVAL; /* maybe allocate device-global string IDs */ if (geth_string_defs[0].id == 0) { /* interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; geth_string_defs[0].id = status; subset_data_intf.iInterface = status; /* MAC address */ status = usb_string_id(c->cdev); if (status < 0) return status; geth_string_defs[1].id = status; ether_desc.iMACAddress = status; } /* allocate and initialize one new instance */ geth = kzalloc(sizeof *geth, GFP_KERNEL); if (!geth) return -ENOMEM; /* export host's Ethernet address in CDC format */ snprintf(geth->ethaddr, sizeof geth->ethaddr, "%02X%02X%02X%02X%02X%02X", ethaddr[0], ethaddr[1], ethaddr[2], ethaddr[3], ethaddr[4], ethaddr[5]); geth_string_defs[1].s = geth->ethaddr; geth->port.cdc_filter = DEFAULT_FILTER; geth->port.func.name = "cdc_subset"; geth->port.func.strings = geth_strings; geth->port.func.bind = geth_bind; geth->port.func.unbind = geth_unbind; geth->port.func.set_alt = geth_set_alt; geth->port.func.disable = geth_disable; status = usb_add_function(c, &geth->port.func); if (status) { geth_string_defs[1].s = NULL; kfree(geth); } return status; }
gpl-2.0
arm000/linux-2.6
drivers/rtc/rtc-jz4740.c
161
9051
/* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net> * JZ4740 SoC RTC driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/rtc.h> #include <linux/slab.h> #include <linux/spinlock.h> #define JZ_REG_RTC_CTRL 0x00 #define JZ_REG_RTC_SEC 0x04 #define JZ_REG_RTC_SEC_ALARM 0x08 #define JZ_REG_RTC_REGULATOR 0x0C #define JZ_REG_RTC_HIBERNATE 0x20 #define JZ_REG_RTC_SCRATCHPAD 0x34 #define JZ_RTC_CTRL_WRDY BIT(7) #define JZ_RTC_CTRL_1HZ BIT(6) #define JZ_RTC_CTRL_1HZ_IRQ BIT(5) #define JZ_RTC_CTRL_AF BIT(4) #define JZ_RTC_CTRL_AF_IRQ BIT(3) #define JZ_RTC_CTRL_AE BIT(2) #define JZ_RTC_CTRL_ENABLE BIT(0) struct jz4740_rtc { struct resource *mem; void __iomem *base; struct rtc_device *rtc; unsigned int irq; spinlock_t lock; }; static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg) { return readl(rtc->base + reg); } static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc) { uint32_t ctrl; int timeout = 1000; do { ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL); } while (!(ctrl & JZ_RTC_CTRL_WRDY) && --timeout); return timeout ? 0 : -EIO; } static inline int jz4740_rtc_reg_write(struct jz4740_rtc *rtc, size_t reg, uint32_t val) { int ret; ret = jz4740_rtc_wait_write_ready(rtc); if (ret == 0) writel(val, rtc->base + reg); return ret; } static int jz4740_rtc_ctrl_set_bits(struct jz4740_rtc *rtc, uint32_t mask, bool set) { int ret; unsigned long flags; uint32_t ctrl; spin_lock_irqsave(&rtc->lock, flags); ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL); /* Don't clear interrupt flags by accident */ ctrl |= JZ_RTC_CTRL_1HZ | JZ_RTC_CTRL_AF; if (set) ctrl |= mask; else ctrl &= ~mask; ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_CTRL, ctrl); spin_unlock_irqrestore(&rtc->lock, flags); return ret; } static int jz4740_rtc_read_time(struct device *dev, struct rtc_time *time) { struct jz4740_rtc *rtc = dev_get_drvdata(dev); uint32_t secs, secs2; int timeout = 5; /* If the seconds register is read while it is updated, it can contain a * bogus value. This can be avoided by making sure that two consecutive * reads have the same value. */ secs = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC); secs2 = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC); while (secs != secs2 && --timeout) { secs = secs2; secs2 = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC); } if (timeout == 0) return -EIO; rtc_time_to_tm(secs, time); return rtc_valid_tm(time); } static int jz4740_rtc_set_mmss(struct device *dev, unsigned long secs) { struct jz4740_rtc *rtc = dev_get_drvdata(dev); return jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, secs); } static int jz4740_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct jz4740_rtc *rtc = dev_get_drvdata(dev); uint32_t secs; uint32_t ctrl; secs = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC_ALARM); ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL); alrm->enabled = !!(ctrl & JZ_RTC_CTRL_AE); alrm->pending = !!(ctrl & JZ_RTC_CTRL_AF); rtc_time_to_tm(secs, &alrm->time); return rtc_valid_tm(&alrm->time); } static int jz4740_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { int ret; struct jz4740_rtc *rtc = dev_get_drvdata(dev); unsigned long secs; rtc_tm_to_time(&alrm->time, &secs); ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC_ALARM, secs); if (!ret) ret = jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AE | JZ_RTC_CTRL_AF_IRQ, alrm->enabled); return ret; } static int jz4740_rtc_update_irq_enable(struct device *dev, unsigned int enable) { struct jz4740_rtc *rtc = dev_get_drvdata(dev); return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_1HZ_IRQ, enable); } static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) { struct jz4740_rtc *rtc = dev_get_drvdata(dev); return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AF_IRQ, enable); } static struct rtc_class_ops jz4740_rtc_ops = { .read_time = jz4740_rtc_read_time, .set_mmss = jz4740_rtc_set_mmss, .read_alarm = jz4740_rtc_read_alarm, .set_alarm = jz4740_rtc_set_alarm, .update_irq_enable = jz4740_rtc_update_irq_enable, .alarm_irq_enable = jz4740_rtc_alarm_irq_enable, }; static irqreturn_t jz4740_rtc_irq(int irq, void *data) { struct jz4740_rtc *rtc = data; uint32_t ctrl; unsigned long events = 0; ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL); if (ctrl & JZ_RTC_CTRL_1HZ) events |= (RTC_UF | RTC_IRQF); if (ctrl & JZ_RTC_CTRL_AF) events |= (RTC_AF | RTC_IRQF); rtc_update_irq(rtc->rtc, 1, events); jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_1HZ | JZ_RTC_CTRL_AF, false); return IRQ_HANDLED; } void jz4740_rtc_poweroff(struct device *dev) { struct jz4740_rtc *rtc = dev_get_drvdata(dev); jz4740_rtc_reg_write(rtc, JZ_REG_RTC_HIBERNATE, 1); } EXPORT_SYMBOL_GPL(jz4740_rtc_poweroff); static int __devinit jz4740_rtc_probe(struct platform_device *pdev) { int ret; struct jz4740_rtc *rtc; uint32_t scratchpad; rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); if (!rtc) return -ENOMEM; rtc->irq = platform_get_irq(pdev, 0); if (rtc->irq < 0) { ret = -ENOENT; dev_err(&pdev->dev, "Failed to get platform irq\n"); goto err_free; } rtc->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!rtc->mem) { ret = -ENOENT; dev_err(&pdev->dev, "Failed to get platform mmio memory\n"); goto err_free; } rtc->mem = request_mem_region(rtc->mem->start, resource_size(rtc->mem), pdev->name); if (!rtc->mem) { ret = -EBUSY; dev_err(&pdev->dev, "Failed to request mmio memory region\n"); goto err_free; } rtc->base = ioremap_nocache(rtc->mem->start, resource_size(rtc->mem)); if (!rtc->base) { ret = -EBUSY; dev_err(&pdev->dev, "Failed to ioremap mmio memory\n"); goto err_release_mem_region; } spin_lock_init(&rtc->lock); platform_set_drvdata(pdev, rtc); device_init_wakeup(&pdev->dev, 1); rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &jz4740_rtc_ops, THIS_MODULE); if (IS_ERR(rtc->rtc)) { ret = PTR_ERR(rtc->rtc); dev_err(&pdev->dev, "Failed to register rtc device: %d\n", ret); goto err_iounmap; } ret = request_irq(rtc->irq, jz4740_rtc_irq, 0, pdev->name, rtc); if (ret) { dev_err(&pdev->dev, "Failed to request rtc irq: %d\n", ret); goto err_unregister_rtc; } scratchpad = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SCRATCHPAD); if (scratchpad != 0x12345678) { ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678); ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0); if (ret) { dev_err(&pdev->dev, "Could not write write to RTC registers\n"); goto err_free_irq; } } return 0; err_free_irq: free_irq(rtc->irq, rtc); err_unregister_rtc: rtc_device_unregister(rtc->rtc); err_iounmap: platform_set_drvdata(pdev, NULL); iounmap(rtc->base); err_release_mem_region: release_mem_region(rtc->mem->start, resource_size(rtc->mem)); err_free: kfree(rtc); return ret; } static int __devexit jz4740_rtc_remove(struct platform_device *pdev) { struct jz4740_rtc *rtc = platform_get_drvdata(pdev); free_irq(rtc->irq, rtc); rtc_device_unregister(rtc->rtc); iounmap(rtc->base); release_mem_region(rtc->mem->start, resource_size(rtc->mem)); kfree(rtc); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM static int jz4740_rtc_suspend(struct device *dev) { struct jz4740_rtc *rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) enable_irq_wake(rtc->irq); return 0; } static int jz4740_rtc_resume(struct device *dev) { struct jz4740_rtc *rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) disable_irq_wake(rtc->irq); return 0; } static const struct dev_pm_ops jz4740_pm_ops = { .suspend = jz4740_rtc_suspend, .resume = jz4740_rtc_resume, }; #define JZ4740_RTC_PM_OPS (&jz4740_pm_ops) #else #define JZ4740_RTC_PM_OPS NULL #endif /* CONFIG_PM */ struct platform_driver jz4740_rtc_driver = { .probe = jz4740_rtc_probe, .remove = __devexit_p(jz4740_rtc_remove), .driver = { .name = "jz4740-rtc", .owner = THIS_MODULE, .pm = JZ4740_RTC_PM_OPS, }, }; static int __init jz4740_rtc_init(void) { return platform_driver_register(&jz4740_rtc_driver); } module_init(jz4740_rtc_init); static void __exit jz4740_rtc_exit(void) { platform_driver_unregister(&jz4740_rtc_driver); } module_exit(jz4740_rtc_exit); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n"); MODULE_ALIAS("platform:jz4740-rtc");
gpl-2.0
coliby/terasic_MTL
drivers/staging/comedi/drivers/ni_labpc_pci.c
161
3478
/* * comedi/drivers/ni_labpc_pci.c * Driver for National Instruments Lab-PC PCI-1200 * Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Driver: ni_labpc_pci * Description: National Instruments Lab-PC PCI-1200 * Devices: (National Instruments) PCI-1200 [ni_pci-1200] * Author: Frank Mori Hess <fmhess@users.sourceforge.net> * Status: works * * This is the PCI-specific support split off from the ni_labpc driver. * * Configuration Options: not applicable, uses PCI auto config * * NI manuals: * 340914a (pci-1200) */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/pci.h> #include "../comedidev.h" #include "mite.h" #include "ni_labpc.h" enum labpc_pci_boardid { BOARD_NI_PCI1200, }; static const struct labpc_boardinfo labpc_pci_boards[] = { [BOARD_NI_PCI1200] = { .name = "ni_pci-1200", .ai_speed = 10000, .ai_scan_up = 1, .has_ao = 1, .is_labpc1200 = 1, .has_mmio = 1, }, }; static int labpc_pci_auto_attach(struct comedi_device *dev, unsigned long context) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); const struct labpc_boardinfo *board = NULL; struct labpc_private *devpriv; int ret; if (context < ARRAY_SIZE(labpc_pci_boards)) board = &labpc_pci_boards[context]; if (!board) return -ENODEV; dev->board_ptr = board; dev->board_name = board->name; ret = comedi_pci_enable(dev); if (ret) return ret; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; devpriv->mite = mite_alloc(pcidev); if (!devpriv->mite) return -ENOMEM; ret = mite_setup(devpriv->mite); if (ret < 0) return ret; dev->iobase = (unsigned long)devpriv->mite->daq_io_addr; return labpc_common_attach(dev, mite_irq(devpriv->mite), IRQF_SHARED); } static void labpc_pci_detach(struct comedi_device *dev) { struct labpc_private *devpriv = dev->private; if (devpriv && devpriv->mite) { mite_unsetup(devpriv->mite); mite_free(devpriv->mite); } if (dev->irq) free_irq(dev->irq, dev); comedi_pci_disable(dev); } static struct comedi_driver labpc_pci_comedi_driver = { .driver_name = "labpc_pci", .module = THIS_MODULE, .auto_attach = labpc_pci_auto_attach, .detach = labpc_pci_detach, }; static DEFINE_PCI_DEVICE_TABLE(labpc_pci_table) = { { PCI_VDEVICE(NI, 0x161), BOARD_NI_PCI1200 }, { 0 } }; MODULE_DEVICE_TABLE(pci, labpc_pci_table); static int labpc_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &labpc_pci_comedi_driver, id->driver_data); } static struct pci_driver labpc_pci_driver = { .name = "labpc_pci", .id_table = labpc_pci_table, .probe = labpc_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(labpc_pci_comedi_driver, labpc_pci_driver); MODULE_DESCRIPTION("Comedi: National Instruments Lab-PC PCI-1200 driver"); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_LICENSE("GPL");
gpl-2.0
varund7726/android_kernel_oneplus_msm8974
drivers/usb/serial/usb_wwan.c
417
22867
/* USB Driver layer for GSM modems Copyright (C) 2005 Matthias Urlichs <smurf@smurf.noris.de> This driver is free software; you can redistribute it and/or modify it under the terms of Version 2 of the GNU General Public License as published by the Free Software Foundation. Portions copied from the Keyspan driver by Hugh Blemings <hugh@blemings.org> History: see the git log. Work sponsored by: Sigos GmbH, Germany <info@sigos.de> This driver exists because the "normal" serial driver doesn't work too well with GSM modems. Issues: - data loss -- one single Receive URB is not nearly enough - controlling the baud rate doesn't make sense */ #define DRIVER_VERSION "v0.7.2" #define DRIVER_AUTHOR "Matthias Urlichs <smurf@smurf.noris.de>" #define DRIVER_DESC "USB Driver for GSM modems" #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial.h> #include "usb-wwan.h" static bool debug; void usb_wwan_dtr_rts(struct usb_serial_port *port, int on) { struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; dbg("%s", __func__); intfdata = port->serial->private; if (!intfdata->send_setup) return; portdata = usb_get_serial_port_data(port); /* FIXME: locking */ portdata->rts_state = on; portdata->dtr_state = on; intfdata->send_setup(port); } EXPORT_SYMBOL(usb_wwan_dtr_rts); void usb_wwan_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct usb_wwan_intf_private *intfdata = port->serial->private; dbg("%s", __func__); /* Doesn't support option setting */ tty_termios_copy_hw(tty->termios, old_termios); if (intfdata->send_setup) intfdata->send_setup(port); } EXPORT_SYMBOL(usb_wwan_set_termios); int usb_wwan_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; unsigned int value; struct usb_wwan_port_private *portdata; portdata = usb_get_serial_port_data(port); value = ((portdata->rts_state) ? TIOCM_RTS : 0) | ((portdata->dtr_state) ? TIOCM_DTR : 0) | ((portdata->cts_state) ? TIOCM_CTS : 0) | ((portdata->dsr_state) ? TIOCM_DSR : 0) | ((portdata->dcd_state) ? TIOCM_CAR : 0) | ((portdata->ri_state) ? TIOCM_RNG : 0); return value; } EXPORT_SYMBOL(usb_wwan_tiocmget); int usb_wwan_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; portdata = usb_get_serial_port_data(port); intfdata = port->serial->private; if (!intfdata->send_setup) return -EINVAL; /* FIXME: what locks portdata fields ? */ if (set & TIOCM_RTS) portdata->rts_state = 1; if (set & TIOCM_DTR) portdata->dtr_state = 1; if (clear & TIOCM_RTS) portdata->rts_state = 0; if (clear & TIOCM_DTR) portdata->dtr_state = 0; return intfdata->send_setup(port); } EXPORT_SYMBOL(usb_wwan_tiocmset); static int get_serial_info(struct usb_serial_port *port, struct serial_struct __user *retinfo) { struct serial_struct tmp; if (!retinfo) return -EFAULT; memset(&tmp, 0, sizeof(tmp)); tmp.line = port->serial->minor; tmp.port = port->number; tmp.baud_base = tty_get_baud_rate(port->port.tty); tmp.close_delay = port->port.close_delay / 10; tmp.closing_wait = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : port->port.closing_wait / 10; if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) return -EFAULT; return 0; } static int set_serial_info(struct usb_serial_port *port, struct serial_struct __user *newinfo) { struct serial_struct new_serial; unsigned int closing_wait, close_delay; int retval = 0; if (copy_from_user(&new_serial, newinfo, sizeof(new_serial))) return -EFAULT; close_delay = new_serial.close_delay * 10; closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ? ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10; mutex_lock(&port->port.mutex); if (!capable(CAP_SYS_ADMIN)) { if ((close_delay != port->port.close_delay) || (closing_wait != port->port.closing_wait)) retval = -EPERM; else retval = -EOPNOTSUPP; } else { port->port.close_delay = close_delay; port->port.closing_wait = closing_wait; } mutex_unlock(&port->port.mutex); return retval; } int usb_wwan_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct usb_serial_port *port = tty->driver_data; dbg("%s cmd 0x%04x", __func__, cmd); switch (cmd) { case TIOCGSERIAL: return get_serial_info(port, (struct serial_struct __user *) arg); case TIOCSSERIAL: return set_serial_info(port, (struct serial_struct __user *) arg); default: break; } dbg("%s arg not supported", __func__); return -ENOIOCTLCMD; } EXPORT_SYMBOL(usb_wwan_ioctl); /* Write */ int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; int i; int left, todo; struct urb *this_urb = NULL; /* spurious */ int err; unsigned long flags; portdata = usb_get_serial_port_data(port); intfdata = port->serial->private; dbg("%s: write (%d chars)", __func__, count); i = 0; left = count; for (i = 0; left > 0 && i < N_OUT_URB; i++) { todo = left; if (todo > OUT_BUFLEN) todo = OUT_BUFLEN; this_urb = portdata->out_urbs[i]; if (test_and_set_bit(i, &portdata->out_busy)) { if (time_before(jiffies, portdata->tx_start_time[i] + 10 * HZ)) continue; usb_unlink_urb(this_urb); continue; } dbg("%s: endpoint %d buf %d", __func__, usb_pipeendpoint(this_urb->pipe), i); err = usb_autopm_get_interface_async(port->serial->interface); if (err < 0) break; /* send the data */ memcpy(this_urb->transfer_buffer, buf, todo); this_urb->transfer_buffer_length = todo; spin_lock_irqsave(&intfdata->susp_lock, flags); if (intfdata->suspended) { usb_anchor_urb(this_urb, &portdata->delayed); spin_unlock_irqrestore(&intfdata->susp_lock, flags); } else { intfdata->in_flight++; spin_unlock_irqrestore(&intfdata->susp_lock, flags); usb_anchor_urb(this_urb, &portdata->submitted); err = usb_submit_urb(this_urb, GFP_ATOMIC); if (err) { dbg("usb_submit_urb %p (write bulk) failed " "(%d)", this_urb, err); usb_unanchor_urb(this_urb); clear_bit(i, &portdata->out_busy); spin_lock_irqsave(&intfdata->susp_lock, flags); intfdata->in_flight--; spin_unlock_irqrestore(&intfdata->susp_lock, flags); usb_autopm_put_interface_async(port->serial->interface); break; } } portdata->tx_start_time[i] = jiffies; buf += todo; left -= todo; } count -= left; dbg("%s: wrote (did %d)", __func__, count); return count; } EXPORT_SYMBOL(usb_wwan_write); static void usb_wwan_in_work(struct work_struct *w) { struct usb_wwan_port_private *portdata = container_of(w, struct usb_wwan_port_private, in_work); struct usb_wwan_intf_private *intfdata; struct list_head *q = &portdata->in_urb_list; struct urb *urb; unsigned char *data; struct tty_struct *tty; struct usb_serial_port *port; int err; ssize_t len; ssize_t count; unsigned long flags; spin_lock_irqsave(&portdata->in_lock, flags); while (!list_empty(q)) { urb = list_first_entry(q, struct urb, urb_list); port = urb->context; if (port->throttle_req || port->throttled) break; tty = tty_port_tty_get(&port->port); if (!tty) break; /* list_empty() will still be false after this; it means * URB is still being processed */ list_del(&urb->urb_list); spin_unlock_irqrestore(&portdata->in_lock, flags); len = urb->actual_length - portdata->n_read; data = urb->transfer_buffer + portdata->n_read; count = tty_insert_flip_string(tty, data, len); tty_flip_buffer_push(tty); tty_kref_put(tty); if (count < len) { dbg("%s: len:%d count:%d n_read:%d\n", __func__, len, count, portdata->n_read); portdata->n_read += count; port->throttled = true; /* add request back to list */ spin_lock_irqsave(&portdata->in_lock, flags); list_add(&urb->urb_list, q); spin_unlock_irqrestore(&portdata->in_lock, flags); return; } /* re-init list pointer to indicate we are done with it */ INIT_LIST_HEAD(&urb->urb_list); portdata->n_read = 0; intfdata = port->serial->private; spin_lock_irqsave(&intfdata->susp_lock, flags); if (!intfdata->suspended && !urb->anchor) { usb_anchor_urb(urb, &portdata->submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { usb_unanchor_urb(urb); if (err != -EPERM) pr_err("%s: submit read urb failed:%d", __func__, err); } usb_mark_last_busy(port->serial->dev); } spin_unlock_irqrestore(&intfdata->susp_lock, flags); spin_lock_irqsave(&portdata->in_lock, flags); } spin_unlock_irqrestore(&portdata->in_lock, flags); } static void usb_wwan_indat_callback(struct urb *urb) { int err; int endpoint; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; struct usb_serial_port *port; int status = urb->status; unsigned long flags; dbg("%s: %p", __func__, urb); endpoint = usb_pipeendpoint(urb->pipe); port = urb->context; portdata = usb_get_serial_port_data(port); intfdata = port->serial->private; usb_mark_last_busy(port->serial->dev); if ((status == -ENOENT || !status) && urb->actual_length) { spin_lock_irqsave(&portdata->in_lock, flags); list_add_tail(&urb->urb_list, &portdata->in_urb_list); spin_unlock_irqrestore(&portdata->in_lock, flags); queue_work(system_nrt_wq, &portdata->in_work); return; } dbg("%s: nonzero status: %d on endpoint %02x.", __func__, status, endpoint); spin_lock(&intfdata->susp_lock); if (intfdata->suspended || !portdata->opened) { spin_unlock(&intfdata->susp_lock); return; } spin_unlock(&intfdata->susp_lock); if (status != -ESHUTDOWN) { usb_anchor_urb(urb, &portdata->submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { usb_unanchor_urb(urb); if (err != -EPERM) pr_err("%s: submit read urb failed:%d", __func__, err); } } } static void usb_wwan_outdat_callback(struct urb *urb) { struct usb_serial_port *port; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; int i; dbg("%s", __func__); port = urb->context; intfdata = port->serial->private; usb_serial_port_softint(port); usb_autopm_put_interface_async(port->serial->interface); portdata = usb_get_serial_port_data(port); spin_lock(&intfdata->susp_lock); intfdata->in_flight--; spin_unlock(&intfdata->susp_lock); for (i = 0; i < N_OUT_URB; ++i) { if (portdata->out_urbs[i] == urb) { smp_mb__before_clear_bit(); clear_bit(i, &portdata->out_busy); break; } } } int usb_wwan_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; int i; int data_len = 0; struct urb *this_urb; portdata = usb_get_serial_port_data(port); for (i = 0; i < N_OUT_URB; i++) { this_urb = portdata->out_urbs[i]; if (this_urb && !test_bit(i, &portdata->out_busy)) data_len += OUT_BUFLEN; } dbg("%s: %d", __func__, data_len); return data_len; } EXPORT_SYMBOL(usb_wwan_write_room); int usb_wwan_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; int i; int data_len = 0; struct urb *this_urb; portdata = usb_get_serial_port_data(port); for (i = 0; i < N_OUT_URB; i++) { this_urb = portdata->out_urbs[i]; /* FIXME: This locking is insufficient as this_urb may go unused during the test */ if (this_urb && test_bit(i, &portdata->out_busy)) data_len += this_urb->transfer_buffer_length; } dbg("%s: %d", __func__, data_len); return data_len; } EXPORT_SYMBOL(usb_wwan_chars_in_buffer); void usb_wwan_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; port->throttle_req = true; dbg("%s:\n", __func__); } EXPORT_SYMBOL(usb_wwan_throttle); void usb_wwan_unthrottle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_wwan_port_private *portdata; portdata = usb_get_serial_port_data(port); dbg("%s:\n", __func__); port->throttle_req = false; port->throttled = false; queue_work(system_nrt_wq, &portdata->in_work); } EXPORT_SYMBOL(usb_wwan_unthrottle); int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata; struct usb_serial *serial = port->serial; int i, err; struct urb *urb; portdata = usb_get_serial_port_data(port); intfdata = serial->private; /* explicitly set the driver mode to raw */ tty->raw = 1; tty->real_raw = 1; set_bit(TTY_NO_WRITE_SPLIT, &tty->flags); dbg("%s", __func__); /* Start reading from the IN endpoint */ for (i = 0; i < N_IN_URB; i++) { urb = portdata->in_urbs[i]; if (!urb) continue; usb_anchor_urb(urb, &portdata->submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); dbg("%s: submit urb %d failed (%d) %d", __func__, i, err, urb->transfer_buffer_length); } } if (intfdata->send_setup) intfdata->send_setup(port); serial->interface->needs_remote_wakeup = 1; spin_lock_irq(&intfdata->susp_lock); portdata->opened = 1; spin_unlock_irq(&intfdata->susp_lock); /* this balances a get in the generic USB serial code */ usb_autopm_put_interface(serial->interface); return 0; } EXPORT_SYMBOL(usb_wwan_open); void usb_wwan_close(struct usb_serial_port *port) { int i; struct usb_serial *serial = port->serial; struct usb_wwan_port_private *portdata; struct usb_wwan_intf_private *intfdata = port->serial->private; dbg("%s", __func__); portdata = usb_get_serial_port_data(port); if (serial->dev) { /* Stop reading/writing urbs */ spin_lock_irq(&intfdata->susp_lock); portdata->opened = 0; spin_unlock_irq(&intfdata->susp_lock); for (i = 0; i < N_IN_URB; i++) usb_kill_urb(portdata->in_urbs[i]); for (i = 0; i < N_OUT_URB; i++) usb_kill_urb(portdata->out_urbs[i]); /* balancing - important as an error cannot be handled*/ usb_autopm_get_interface_no_resume(serial->interface); serial->interface->needs_remote_wakeup = 0; } } EXPORT_SYMBOL(usb_wwan_close); /* Helper functions used by usb_wwan_setup_urbs */ static struct urb *usb_wwan_setup_urb(struct usb_serial *serial, int endpoint, int dir, void *ctx, char *buf, int len, void (*callback) (struct urb *)) { struct urb *urb; if (endpoint == -1) return NULL; /* endpoint not needed */ urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */ if (urb == NULL) { dbg("%s: alloc for endpoint %d failed.", __func__, endpoint); return NULL; } /* Fill URB using supplied data. */ usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, endpoint) | dir, buf, len, callback, ctx); return urb; } /* Setup urbs */ static void usb_wwan_setup_urbs(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; dbg("%s", __func__); for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); /* Do indat endpoints first */ for (j = 0; j < N_IN_URB; ++j) { portdata->in_urbs[j] = usb_wwan_setup_urb(serial, port-> bulk_in_endpointAddress, USB_DIR_IN, port, portdata-> in_buffer[j], IN_BUFLEN, usb_wwan_indat_callback); } /* outdat endpoints */ for (j = 0; j < N_OUT_URB; ++j) { portdata->out_urbs[j] = usb_wwan_setup_urb(serial, port-> bulk_out_endpointAddress, USB_DIR_OUT, port, portdata-> out_buffer [j], OUT_BUFLEN, usb_wwan_outdat_callback); } } } int usb_wwan_startup(struct usb_serial *serial) { int i, j, err; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; u8 *buffer; dbg("%s", __func__); /* Now setup per port private data */ for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); if (!portdata) { dbg("%s: kmalloc for usb_wwan_port_private (%d) failed!.", __func__, i); return 1; } init_usb_anchor(&portdata->delayed); init_usb_anchor(&portdata->submitted); INIT_WORK(&portdata->in_work, usb_wwan_in_work); INIT_LIST_HEAD(&portdata->in_urb_list); spin_lock_init(&portdata->in_lock); for (j = 0; j < N_IN_URB; j++) { buffer = kmalloc(IN_BUFLEN, GFP_KERNEL); if (!buffer) goto bail_out_error; portdata->in_buffer[j] = buffer; } for (j = 0; j < N_OUT_URB; j++) { buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); if (!buffer) goto bail_out_error2; portdata->out_buffer[j] = buffer; } usb_set_serial_port_data(port, portdata); if (!port->interrupt_in_urb) continue; err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (err) dbg("%s: submit irq_in urb failed %d", __func__, err); } usb_wwan_setup_urbs(serial); return 0; bail_out_error2: for (j = 0; j < N_OUT_URB; j++) kfree(portdata->out_buffer[j]); bail_out_error: for (j = 0; j < N_IN_URB; j++) kfree(portdata->in_buffer[j]); kfree(portdata); return 1; } EXPORT_SYMBOL(usb_wwan_startup); static void stop_read_write_urbs(struct usb_serial *serial) { int i; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; /* Stop reading/writing urbs */ for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); usb_kill_anchored_urbs(&portdata->submitted); } } void usb_wwan_disconnect(struct usb_serial *serial) { dbg("%s", __func__); stop_read_write_urbs(serial); } EXPORT_SYMBOL(usb_wwan_disconnect); void usb_wwan_release(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; struct urb *urb; struct list_head *q; unsigned long flags; /* Now free them */ for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); cancel_work_sync(&portdata->in_work); /* TBD: do we really need this */ spin_lock_irqsave(&portdata->in_lock, flags); q = &portdata->in_urb_list; while (!list_empty(q)) { urb = list_first_entry(q, struct urb, urb_list); list_del_init(&urb->urb_list); } spin_unlock_irqrestore(&portdata->in_lock, flags); for (j = 0; j < N_IN_URB; j++) { usb_free_urb(portdata->in_urbs[j]); kfree(portdata->in_buffer[j]); portdata->in_urbs[j] = NULL; } for (j = 0; j < N_OUT_URB; j++) { usb_free_urb(portdata->out_urbs[j]); kfree(portdata->out_buffer[j]); portdata->out_urbs[j] = NULL; } } /* Now free per port private data */ for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; kfree(usb_get_serial_port_data(port)); } } EXPORT_SYMBOL(usb_wwan_release); #ifdef CONFIG_PM int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message) { struct usb_wwan_intf_private *intfdata = serial->private; int b; dbg("%s entered", __func__); if (PMSG_IS_AUTO(message)) { spin_lock_irq(&intfdata->susp_lock); b = intfdata->in_flight; spin_unlock_irq(&intfdata->susp_lock); if (b || pm_runtime_autosuspend_expiration(&serial->dev->dev)) return -EBUSY; } spin_lock_irq(&intfdata->susp_lock); intfdata->suspended = 1; spin_unlock_irq(&intfdata->susp_lock); stop_read_write_urbs(serial); return 0; } EXPORT_SYMBOL(usb_wwan_suspend); static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata) { int i; for (i = 0; i < N_OUT_URB; i++) { if (urb == portdata->out_urbs[i]) { clear_bit(i, &portdata->out_busy); break; } } } static void play_delayed(struct usb_serial_port *port) { struct usb_wwan_intf_private *data; struct usb_wwan_port_private *portdata; struct urb *urb; int err; portdata = usb_get_serial_port_data(port); data = port->serial->private; while ((urb = usb_get_from_anchor(&portdata->delayed))) { usb_anchor_urb(urb, &portdata->submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (!err) { data->in_flight++; } else { usb_unanchor_urb(urb); /* we have to throw away the rest */ do { unbusy_queued_urb(urb, portdata); usb_autopm_put_interface_no_suspend(port->serial->interface); } while ((urb = usb_get_from_anchor(&portdata->delayed))); break; } } } int usb_wwan_resume(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_intf_private *intfdata = serial->private; struct usb_wwan_port_private *portdata; struct urb *urb; int err = 0; dbg("%s entered", __func__); /* get the interrupt URBs resubmitted unconditionally */ for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; if (!port->interrupt_in_urb) { dbg("%s: No interrupt URB for port %d", __func__, i); continue; } err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); dbg("Submitted interrupt URB for port %d (result %d)", i, err); if (err < 0) { err("%s: Error %d for interrupt URB of port%d", __func__, err, i); goto err_out; } } spin_lock_irq(&intfdata->susp_lock); intfdata->suspended = 0; for (i = 0; i < serial->num_ports; i++) { /* walk all ports */ port = serial->port[i]; portdata = usb_get_serial_port_data(port); /* skip closed ports */ if (!portdata->opened) continue; for (j = 0; j < N_IN_URB; j++) { urb = portdata->in_urbs[j]; /* don't re-submit if it already was submitted or if * it is being processed by in_work */ if (urb->anchor || !list_empty(&urb->urb_list)) continue; usb_anchor_urb(urb, &portdata->submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { err("%s: Error %d for bulk URB[%d]:%p %d", __func__, err, j, urb, i); usb_unanchor_urb(urb); intfdata->suspended = 1; spin_unlock_irq(&intfdata->susp_lock); goto err_out; } } play_delayed(port); } spin_unlock_irq(&intfdata->susp_lock); err_out: return err; } EXPORT_SYMBOL(usb_wwan_resume); #endif MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug messages");
gpl-2.0
amir73il/ext4-snapshots
sound/pci/echoaudio/indigodj.c
417
2923
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2003-2004 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define INDIGO_FAMILY #define ECHOCARD_INDIGO_DJ #define ECHOCARD_NAME "Indigo DJ" #define ECHOCARD_HAS_SUPER_INTERLEAVE #define ECHOCARD_HAS_VMIXER #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32 /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 0 */ #define PX_ANALOG_IN 8 /* 0 */ #define PX_DIGITAL_IN 8 /* 0 */ #define PX_NUM 8 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 4 */ #define BX_DIGITAL_OUT 4 /* 0 */ #define BX_ANALOG_IN 4 /* 0 */ #define BX_DIGITAL_IN 4 /* 0 */ #define BX_NUM 4 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <asm/io.h> #include <linux/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/loader_dsp.fw"); MODULE_FIRMWARE("ea/indigo_dj_dsp.fw"); #define FW_361_LOADER 0 #define FW_INDIGO_DJ_DSP 1 static const struct firmware card_fw[] = { {0, "loader_dsp.fw"}, {0, "indigo_dj_dsp.fw"} }; static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = { {0x1057, 0x3410, 0xECC0, 0x00B0, 0, 0, 0}, /* Indigo DJ*/ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000, .rate_min = 32000, .rate_max = 96000, .channels_min = 1, .channels_max = 4, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, }; #include "indigodj_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio.c"
gpl-2.0
garwynn/android_kernel_lge_msm8996
drivers/input/touchscreen/wm97xx-core.c
929
22252
/* * wm97xx-core.c -- Touch screen driver core for Wolfson WM9705, WM9712 * and WM9713 AC97 Codecs. * * Copyright 2003, 2004, 2005, 2006, 2007, 2008 Wolfson Microelectronics PLC. * Author: Liam Girdwood <lrg@slimlogic.co.uk> * Parts Copyright : Ian Molton <spyro@f2s.com> * Andrew Zabolotny <zap@homelink.ru> * Russell King <rmk@arm.linux.org.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Notes: * * Features: * - supports WM9705, WM9712, WM9713 * - polling mode * - continuous mode (arch-dependent) * - adjustable rpu/dpp settings * - adjustable pressure current * - adjustable sample settle delay * - 4 and 5 wire touchscreens (5 wire is WM9712 only) * - pen down detection * - battery monitor * - sample AUX adcs * - power management * - codec GPIO * - codec event notification * Todo * - Support for async sampling control for noisy LCDs. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/proc_fs.h> #include <linux/pm.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/workqueue.h> #include <linux/wm97xx.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/slab.h> #define TS_NAME "wm97xx" #define WM_CORE_VERSION "1.00" #define DEFAULT_PRESSURE 0xb0c0 /* * Touchscreen absolute values * * These parameters are used to help the input layer discard out of * range readings and reduce jitter etc. * * o min, max:- indicate the min and max values your touch screen returns * o fuzz:- use a higher number to reduce jitter * * The default values correspond to Mainstone II in QVGA mode * * Please read * Documentation/input/input-programming.txt for more details. */ static int abs_x[3] = {150, 4000, 5}; module_param_array(abs_x, int, NULL, 0); MODULE_PARM_DESC(abs_x, "Touchscreen absolute X min, max, fuzz"); static int abs_y[3] = {200, 4000, 40}; module_param_array(abs_y, int, NULL, 0); MODULE_PARM_DESC(abs_y, "Touchscreen absolute Y min, max, fuzz"); static int abs_p[3] = {0, 150, 4}; module_param_array(abs_p, int, NULL, 0); MODULE_PARM_DESC(abs_p, "Touchscreen absolute Pressure min, max, fuzz"); /* * wm97xx IO access, all IO locking done by AC97 layer */ int wm97xx_reg_read(struct wm97xx *wm, u16 reg) { if (wm->ac97) return wm->ac97->bus->ops->read(wm->ac97, reg); else return -1; } EXPORT_SYMBOL_GPL(wm97xx_reg_read); void wm97xx_reg_write(struct wm97xx *wm, u16 reg, u16 val) { /* cache digitiser registers */ if (reg >= AC97_WM9713_DIG1 && reg <= AC97_WM9713_DIG3) wm->dig[(reg - AC97_WM9713_DIG1) >> 1] = val; /* cache gpio regs */ if (reg >= AC97_GPIO_CFG && reg <= AC97_MISC_AFE) wm->gpio[(reg - AC97_GPIO_CFG) >> 1] = val; /* wm9713 irq reg */ if (reg == 0x5a) wm->misc = val; if (wm->ac97) wm->ac97->bus->ops->write(wm->ac97, reg, val); } EXPORT_SYMBOL_GPL(wm97xx_reg_write); /** * wm97xx_read_aux_adc - Read the aux adc. * @wm: wm97xx device. * @adcsel: codec ADC to be read * * Reads the selected AUX ADC. */ int wm97xx_read_aux_adc(struct wm97xx *wm, u16 adcsel) { int power_adc = 0, auxval; u16 power = 0; int rc = 0; int timeout = 0; /* get codec */ mutex_lock(&wm->codec_mutex); /* When the touchscreen is not in use, we may have to power up * the AUX ADC before we can use sample the AUX inputs-> */ if (wm->id == WM9713_ID2 && (power = wm97xx_reg_read(wm, AC97_EXTENDED_MID)) & 0x8000) { power_adc = 1; wm97xx_reg_write(wm, AC97_EXTENDED_MID, power & 0x7fff); } /* Prepare the codec for AUX reading */ wm->codec->aux_prepare(wm); /* Turn polling mode on to read AUX ADC */ wm->pen_probably_down = 1; while (rc != RC_VALID && timeout++ < 5) rc = wm->codec->poll_sample(wm, adcsel, &auxval); if (power_adc) wm97xx_reg_write(wm, AC97_EXTENDED_MID, power | 0x8000); wm->codec->dig_restore(wm); wm->pen_probably_down = 0; if (timeout >= 5) { dev_err(wm->dev, "timeout reading auxadc %d, disabling digitiser\n", adcsel); wm->codec->dig_enable(wm, false); } mutex_unlock(&wm->codec_mutex); return (rc == RC_VALID ? auxval & 0xfff : -EBUSY); } EXPORT_SYMBOL_GPL(wm97xx_read_aux_adc); /** * wm97xx_get_gpio - Get the status of a codec GPIO. * @wm: wm97xx device. * @gpio: gpio * * Get the status of a codec GPIO pin */ enum wm97xx_gpio_status wm97xx_get_gpio(struct wm97xx *wm, u32 gpio) { u16 status; enum wm97xx_gpio_status ret; mutex_lock(&wm->codec_mutex); status = wm97xx_reg_read(wm, AC97_GPIO_STATUS); if (status & gpio) ret = WM97XX_GPIO_HIGH; else ret = WM97XX_GPIO_LOW; mutex_unlock(&wm->codec_mutex); return ret; } EXPORT_SYMBOL_GPL(wm97xx_get_gpio); /** * wm97xx_set_gpio - Set the status of a codec GPIO. * @wm: wm97xx device. * @gpio: gpio * * * Set the status of a codec GPIO pin */ void wm97xx_set_gpio(struct wm97xx *wm, u32 gpio, enum wm97xx_gpio_status status) { u16 reg; mutex_lock(&wm->codec_mutex); reg = wm97xx_reg_read(wm, AC97_GPIO_STATUS); if (status == WM97XX_GPIO_HIGH) reg |= gpio; else reg &= ~gpio; if (wm->id == WM9712_ID2 && wm->variant != WM97xx_WM1613) wm97xx_reg_write(wm, AC97_GPIO_STATUS, reg << 1); else wm97xx_reg_write(wm, AC97_GPIO_STATUS, reg); mutex_unlock(&wm->codec_mutex); } EXPORT_SYMBOL_GPL(wm97xx_set_gpio); /* * Codec GPIO pin configuration, this sets pin direction, polarity, * stickyness and wake up. */ void wm97xx_config_gpio(struct wm97xx *wm, u32 gpio, enum wm97xx_gpio_dir dir, enum wm97xx_gpio_pol pol, enum wm97xx_gpio_sticky sticky, enum wm97xx_gpio_wake wake) { u16 reg; mutex_lock(&wm->codec_mutex); reg = wm97xx_reg_read(wm, AC97_GPIO_POLARITY); if (pol == WM97XX_GPIO_POL_HIGH) reg |= gpio; else reg &= ~gpio; wm97xx_reg_write(wm, AC97_GPIO_POLARITY, reg); reg = wm97xx_reg_read(wm, AC97_GPIO_STICKY); if (sticky == WM97XX_GPIO_STICKY) reg |= gpio; else reg &= ~gpio; wm97xx_reg_write(wm, AC97_GPIO_STICKY, reg); reg = wm97xx_reg_read(wm, AC97_GPIO_WAKEUP); if (wake == WM97XX_GPIO_WAKE) reg |= gpio; else reg &= ~gpio; wm97xx_reg_write(wm, AC97_GPIO_WAKEUP, reg); reg = wm97xx_reg_read(wm, AC97_GPIO_CFG); if (dir == WM97XX_GPIO_IN) reg |= gpio; else reg &= ~gpio; wm97xx_reg_write(wm, AC97_GPIO_CFG, reg); mutex_unlock(&wm->codec_mutex); } EXPORT_SYMBOL_GPL(wm97xx_config_gpio); /* * Configure the WM97XX_PRP value to use while system is suspended. * If a value other than 0 is set then WM97xx pen detection will be * left enabled in the configured mode while the system is in suspend, * the device has users and suspend has not been disabled via the * wakeup sysfs entries. * * @wm: WM97xx device to configure * @mode: WM97XX_PRP value to configure while suspended */ void wm97xx_set_suspend_mode(struct wm97xx *wm, u16 mode) { wm->suspend_mode = mode; device_init_wakeup(&wm->input_dev->dev, mode != 0); } EXPORT_SYMBOL_GPL(wm97xx_set_suspend_mode); /* * Handle a pen down interrupt. */ static void wm97xx_pen_irq_worker(struct work_struct *work) { struct wm97xx *wm = container_of(work, struct wm97xx, pen_event_work); int pen_was_down = wm->pen_is_down; /* do we need to enable the touch panel reader */ if (wm->id == WM9705_ID2) { if (wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD) & WM97XX_PEN_DOWN) wm->pen_is_down = 1; else wm->pen_is_down = 0; } else { u16 status, pol; mutex_lock(&wm->codec_mutex); status = wm97xx_reg_read(wm, AC97_GPIO_STATUS); pol = wm97xx_reg_read(wm, AC97_GPIO_POLARITY); if (WM97XX_GPIO_13 & pol & status) { wm->pen_is_down = 1; wm97xx_reg_write(wm, AC97_GPIO_POLARITY, pol & ~WM97XX_GPIO_13); } else { wm->pen_is_down = 0; wm97xx_reg_write(wm, AC97_GPIO_POLARITY, pol | WM97XX_GPIO_13); } if (wm->id == WM9712_ID2 && wm->variant != WM97xx_WM1613) wm97xx_reg_write(wm, AC97_GPIO_STATUS, (status & ~WM97XX_GPIO_13) << 1); else wm97xx_reg_write(wm, AC97_GPIO_STATUS, status & ~WM97XX_GPIO_13); mutex_unlock(&wm->codec_mutex); } /* If the system is not using continuous mode or it provides a * pen down operation then we need to schedule polls while the * pen is down. Otherwise the machine driver is responsible * for scheduling reads. */ if (!wm->mach_ops->acc_enabled || wm->mach_ops->acc_pen_down) { if (wm->pen_is_down && !pen_was_down) { /* Data is not available immediately on pen down */ queue_delayed_work(wm->ts_workq, &wm->ts_reader, 1); } /* Let ts_reader report the pen up for debounce. */ if (!wm->pen_is_down && pen_was_down) wm->pen_is_down = 1; } if (!wm->pen_is_down && wm->mach_ops->acc_enabled) wm->mach_ops->acc_pen_up(wm); wm->mach_ops->irq_enable(wm, 1); } /* * Codec PENDOWN irq handler * * We have to disable the codec interrupt in the handler because it * can take up to 1ms to clear the interrupt source. We schedule a task * in a work queue to do the actual interaction with the chip. The * interrupt is then enabled again in the slow handler when the source * has been cleared. */ static irqreturn_t wm97xx_pen_interrupt(int irq, void *dev_id) { struct wm97xx *wm = dev_id; if (!work_pending(&wm->pen_event_work)) { wm->mach_ops->irq_enable(wm, 0); queue_work(wm->ts_workq, &wm->pen_event_work); } return IRQ_HANDLED; } /* * initialise pen IRQ handler and workqueue */ static int wm97xx_init_pen_irq(struct wm97xx *wm) { u16 reg; /* If an interrupt is supplied an IRQ enable operation must also be * provided. */ BUG_ON(!wm->mach_ops->irq_enable); if (request_irq(wm->pen_irq, wm97xx_pen_interrupt, IRQF_SHARED, "wm97xx-pen", wm)) { dev_err(wm->dev, "Failed to register pen down interrupt, polling"); wm->pen_irq = 0; return -EINVAL; } /* Configure GPIO as interrupt source on WM971x */ if (wm->id != WM9705_ID2) { BUG_ON(!wm->mach_ops->irq_gpio); reg = wm97xx_reg_read(wm, AC97_MISC_AFE); wm97xx_reg_write(wm, AC97_MISC_AFE, reg & ~(wm->mach_ops->irq_gpio)); reg = wm97xx_reg_read(wm, 0x5a); wm97xx_reg_write(wm, 0x5a, reg & ~0x0001); } return 0; } static int wm97xx_read_samples(struct wm97xx *wm) { struct wm97xx_data data; int rc; mutex_lock(&wm->codec_mutex); if (wm->mach_ops && wm->mach_ops->acc_enabled) rc = wm->mach_ops->acc_pen_down(wm); else rc = wm->codec->poll_touch(wm, &data); if (rc & RC_PENUP) { if (wm->pen_is_down) { wm->pen_is_down = 0; dev_dbg(wm->dev, "pen up\n"); input_report_abs(wm->input_dev, ABS_PRESSURE, 0); input_report_key(wm->input_dev, BTN_TOUCH, 0); input_sync(wm->input_dev); } else if (!(rc & RC_AGAIN)) { /* We need high frequency updates only while * pen is down, the user never will be able to * touch screen faster than a few times per * second... On the other hand, when the user * is actively working with the touchscreen we * don't want to lose the quick response. So we * will slowly increase sleep time after the * pen is up and quicky restore it to ~one task * switch when pen is down again. */ if (wm->ts_reader_interval < HZ / 10) wm->ts_reader_interval++; } } else if (rc & RC_VALID) { dev_dbg(wm->dev, "pen down: x=%x:%d, y=%x:%d, pressure=%x:%d\n", data.x >> 12, data.x & 0xfff, data.y >> 12, data.y & 0xfff, data.p >> 12, data.p & 0xfff); if (abs_x[0] > (data.x & 0xfff) || abs_x[1] < (data.x & 0xfff) || abs_y[0] > (data.y & 0xfff) || abs_y[1] < (data.y & 0xfff)) { dev_dbg(wm->dev, "Measurement out of range, dropping it\n"); rc = RC_AGAIN; goto out; } input_report_abs(wm->input_dev, ABS_X, data.x & 0xfff); input_report_abs(wm->input_dev, ABS_Y, data.y & 0xfff); input_report_abs(wm->input_dev, ABS_PRESSURE, data.p & 0xfff); input_report_key(wm->input_dev, BTN_TOUCH, 1); input_sync(wm->input_dev); wm->pen_is_down = 1; wm->ts_reader_interval = wm->ts_reader_min_interval; } else if (rc & RC_PENDOWN) { dev_dbg(wm->dev, "pen down\n"); wm->pen_is_down = 1; wm->ts_reader_interval = wm->ts_reader_min_interval; } out: mutex_unlock(&wm->codec_mutex); return rc; } /* * The touchscreen sample reader. */ static void wm97xx_ts_reader(struct work_struct *work) { int rc; struct wm97xx *wm = container_of(work, struct wm97xx, ts_reader.work); BUG_ON(!wm->codec); do { rc = wm97xx_read_samples(wm); } while (rc & RC_AGAIN); if (wm->pen_is_down || !wm->pen_irq) queue_delayed_work(wm->ts_workq, &wm->ts_reader, wm->ts_reader_interval); } /** * wm97xx_ts_input_open - Open the touch screen input device. * @idev: Input device to be opened. * * Called by the input sub system to open a wm97xx touchscreen device. * Starts the touchscreen thread and touch digitiser. */ static int wm97xx_ts_input_open(struct input_dev *idev) { struct wm97xx *wm = input_get_drvdata(idev); wm->ts_workq = create_singlethread_workqueue("kwm97xx"); if (wm->ts_workq == NULL) { dev_err(wm->dev, "Failed to create workqueue\n"); return -EINVAL; } /* start digitiser */ if (wm->mach_ops && wm->mach_ops->acc_enabled) wm->codec->acc_enable(wm, 1); wm->codec->dig_enable(wm, 1); INIT_DELAYED_WORK(&wm->ts_reader, wm97xx_ts_reader); INIT_WORK(&wm->pen_event_work, wm97xx_pen_irq_worker); wm->ts_reader_min_interval = HZ >= 100 ? HZ / 100 : 1; if (wm->ts_reader_min_interval < 1) wm->ts_reader_min_interval = 1; wm->ts_reader_interval = wm->ts_reader_min_interval; wm->pen_is_down = 0; if (wm->pen_irq) wm97xx_init_pen_irq(wm); else dev_err(wm->dev, "No IRQ specified\n"); /* If we either don't have an interrupt for pen down events or * failed to acquire it then we need to poll. */ if (wm->pen_irq == 0) queue_delayed_work(wm->ts_workq, &wm->ts_reader, wm->ts_reader_interval); return 0; } /** * wm97xx_ts_input_close - Close the touch screen input device. * @idev: Input device to be closed. * * Called by the input sub system to close a wm97xx touchscreen * device. Kills the touchscreen thread and stops the touch * digitiser. */ static void wm97xx_ts_input_close(struct input_dev *idev) { struct wm97xx *wm = input_get_drvdata(idev); u16 reg; if (wm->pen_irq) { /* Return the interrupt to GPIO usage (disabling it) */ if (wm->id != WM9705_ID2) { BUG_ON(!wm->mach_ops->irq_gpio); reg = wm97xx_reg_read(wm, AC97_MISC_AFE); wm97xx_reg_write(wm, AC97_MISC_AFE, reg | wm->mach_ops->irq_gpio); } free_irq(wm->pen_irq, wm); } wm->pen_is_down = 0; /* Balance out interrupt disables/enables */ if (cancel_work_sync(&wm->pen_event_work)) wm->mach_ops->irq_enable(wm, 1); /* ts_reader rearms itself so we need to explicitly stop it * before we destroy the workqueue. */ cancel_delayed_work_sync(&wm->ts_reader); destroy_workqueue(wm->ts_workq); /* stop digitiser */ wm->codec->dig_enable(wm, 0); if (wm->mach_ops && wm->mach_ops->acc_enabled) wm->codec->acc_enable(wm, 0); } static int wm97xx_probe(struct device *dev) { struct wm97xx *wm; struct wm97xx_pdata *pdata = dev_get_platdata(dev); int ret = 0, id = 0; wm = kzalloc(sizeof(struct wm97xx), GFP_KERNEL); if (!wm) return -ENOMEM; mutex_init(&wm->codec_mutex); wm->dev = dev; dev_set_drvdata(dev, wm); wm->ac97 = to_ac97_t(dev); /* check that we have a supported codec */ id = wm97xx_reg_read(wm, AC97_VENDOR_ID1); if (id != WM97XX_ID1) { dev_err(dev, "Device with vendor %04x is not a wm97xx\n", id); ret = -ENODEV; goto alloc_err; } wm->id = wm97xx_reg_read(wm, AC97_VENDOR_ID2); wm->variant = WM97xx_GENERIC; dev_info(wm->dev, "detected a wm97%02x codec\n", wm->id & 0xff); switch (wm->id & 0xff) { #ifdef CONFIG_TOUCHSCREEN_WM9705 case 0x05: wm->codec = &wm9705_codec; break; #endif #ifdef CONFIG_TOUCHSCREEN_WM9712 case 0x12: wm->codec = &wm9712_codec; break; #endif #ifdef CONFIG_TOUCHSCREEN_WM9713 case 0x13: wm->codec = &wm9713_codec; break; #endif default: dev_err(wm->dev, "Support for wm97%02x not compiled in.\n", wm->id & 0xff); ret = -ENODEV; goto alloc_err; } /* set up physical characteristics */ wm->codec->phy_init(wm); /* load gpio cache */ wm->gpio[0] = wm97xx_reg_read(wm, AC97_GPIO_CFG); wm->gpio[1] = wm97xx_reg_read(wm, AC97_GPIO_POLARITY); wm->gpio[2] = wm97xx_reg_read(wm, AC97_GPIO_STICKY); wm->gpio[3] = wm97xx_reg_read(wm, AC97_GPIO_WAKEUP); wm->gpio[4] = wm97xx_reg_read(wm, AC97_GPIO_STATUS); wm->gpio[5] = wm97xx_reg_read(wm, AC97_MISC_AFE); wm->input_dev = input_allocate_device(); if (wm->input_dev == NULL) { ret = -ENOMEM; goto alloc_err; } /* set up touch configuration */ wm->input_dev->name = "wm97xx touchscreen"; wm->input_dev->phys = "wm97xx"; wm->input_dev->open = wm97xx_ts_input_open; wm->input_dev->close = wm97xx_ts_input_close; __set_bit(EV_ABS, wm->input_dev->evbit); __set_bit(EV_KEY, wm->input_dev->evbit); __set_bit(BTN_TOUCH, wm->input_dev->keybit); input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1], abs_x[2], 0); input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1], abs_y[2], 0); input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1], abs_p[2], 0); input_set_drvdata(wm->input_dev, wm); wm->input_dev->dev.parent = dev; ret = input_register_device(wm->input_dev); if (ret < 0) goto dev_alloc_err; /* register our battery device */ wm->battery_dev = platform_device_alloc("wm97xx-battery", -1); if (!wm->battery_dev) { ret = -ENOMEM; goto batt_err; } platform_set_drvdata(wm->battery_dev, wm); wm->battery_dev->dev.parent = dev; wm->battery_dev->dev.platform_data = pdata; ret = platform_device_add(wm->battery_dev); if (ret < 0) goto batt_reg_err; /* register our extended touch device (for machine specific * extensions) */ wm->touch_dev = platform_device_alloc("wm97xx-touch", -1); if (!wm->touch_dev) { ret = -ENOMEM; goto touch_err; } platform_set_drvdata(wm->touch_dev, wm); wm->touch_dev->dev.parent = dev; wm->touch_dev->dev.platform_data = pdata; ret = platform_device_add(wm->touch_dev); if (ret < 0) goto touch_reg_err; return ret; touch_reg_err: platform_device_put(wm->touch_dev); touch_err: platform_device_del(wm->battery_dev); batt_reg_err: platform_device_put(wm->battery_dev); batt_err: input_unregister_device(wm->input_dev); wm->input_dev = NULL; dev_alloc_err: input_free_device(wm->input_dev); alloc_err: kfree(wm); return ret; } static int wm97xx_remove(struct device *dev) { struct wm97xx *wm = dev_get_drvdata(dev); platform_device_unregister(wm->battery_dev); platform_device_unregister(wm->touch_dev); input_unregister_device(wm->input_dev); kfree(wm); return 0; } #ifdef CONFIG_PM static int wm97xx_suspend(struct device *dev, pm_message_t state) { struct wm97xx *wm = dev_get_drvdata(dev); u16 reg; int suspend_mode; if (device_may_wakeup(&wm->input_dev->dev)) suspend_mode = wm->suspend_mode; else suspend_mode = 0; if (wm->input_dev->users) cancel_delayed_work_sync(&wm->ts_reader); /* Power down the digitiser (bypassing the cache for resume) */ reg = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER2); reg &= ~WM97XX_PRP_DET_DIG; if (wm->input_dev->users) reg |= suspend_mode; wm->ac97->bus->ops->write(wm->ac97, AC97_WM97XX_DIGITISER2, reg); /* WM9713 has an additional power bit - turn it off if there * are no users or if suspend mode is zero. */ if (wm->id == WM9713_ID2 && (!wm->input_dev->users || !suspend_mode)) { reg = wm97xx_reg_read(wm, AC97_EXTENDED_MID) | 0x8000; wm97xx_reg_write(wm, AC97_EXTENDED_MID, reg); } return 0; } static int wm97xx_resume(struct device *dev) { struct wm97xx *wm = dev_get_drvdata(dev); /* restore digitiser and gpios */ if (wm->id == WM9713_ID2) { wm97xx_reg_write(wm, AC97_WM9713_DIG1, wm->dig[0]); wm97xx_reg_write(wm, 0x5a, wm->misc); if (wm->input_dev->users) { u16 reg; reg = wm97xx_reg_read(wm, AC97_EXTENDED_MID) & 0x7fff; wm97xx_reg_write(wm, AC97_EXTENDED_MID, reg); } } wm97xx_reg_write(wm, AC97_WM9713_DIG2, wm->dig[1]); wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2]); wm97xx_reg_write(wm, AC97_GPIO_CFG, wm->gpio[0]); wm97xx_reg_write(wm, AC97_GPIO_POLARITY, wm->gpio[1]); wm97xx_reg_write(wm, AC97_GPIO_STICKY, wm->gpio[2]); wm97xx_reg_write(wm, AC97_GPIO_WAKEUP, wm->gpio[3]); wm97xx_reg_write(wm, AC97_GPIO_STATUS, wm->gpio[4]); wm97xx_reg_write(wm, AC97_MISC_AFE, wm->gpio[5]); if (wm->input_dev->users && !wm->pen_irq) { wm->ts_reader_interval = wm->ts_reader_min_interval; queue_delayed_work(wm->ts_workq, &wm->ts_reader, wm->ts_reader_interval); } return 0; } #else #define wm97xx_suspend NULL #define wm97xx_resume NULL #endif /* * Machine specific operations */ int wm97xx_register_mach_ops(struct wm97xx *wm, struct wm97xx_mach_ops *mach_ops) { mutex_lock(&wm->codec_mutex); if (wm->mach_ops) { mutex_unlock(&wm->codec_mutex); return -EINVAL; } wm->mach_ops = mach_ops; mutex_unlock(&wm->codec_mutex); return 0; } EXPORT_SYMBOL_GPL(wm97xx_register_mach_ops); void wm97xx_unregister_mach_ops(struct wm97xx *wm) { mutex_lock(&wm->codec_mutex); wm->mach_ops = NULL; mutex_unlock(&wm->codec_mutex); } EXPORT_SYMBOL_GPL(wm97xx_unregister_mach_ops); static struct device_driver wm97xx_driver = { .name = "wm97xx-ts", .bus = &ac97_bus_type, .owner = THIS_MODULE, .probe = wm97xx_probe, .remove = wm97xx_remove, .suspend = wm97xx_suspend, .resume = wm97xx_resume, }; static int __init wm97xx_init(void) { return driver_register(&wm97xx_driver); } static void __exit wm97xx_exit(void) { driver_unregister(&wm97xx_driver); } module_init(wm97xx_init); module_exit(wm97xx_exit); /* Module information */ MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>"); MODULE_DESCRIPTION("WM97xx Core - Touch Screen / AUX ADC / GPIO Driver"); MODULE_LICENSE("GPL");
gpl-2.0
rcrobles/linux-stable-4.3
drivers/cpuidle/cpuidle-calxeda.c
929
2229
/* * Copyright 2012 Calxeda, Inc. * * Based on arch/arm/plat-mxc/cpuidle.c: #v3.7 * Copyright 2012 Freescale Semiconductor, Inc. * Copyright 2012 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. * * Maintainer: Rob Herring <rob.herring@calxeda.com> */ #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/platform_device.h> #include <linux/psci.h> #include <asm/cpuidle.h> #include <asm/suspend.h> #include <uapi/linux/psci.h> #define CALXEDA_IDLE_PARAM \ ((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \ (0 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \ (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT)) static int calxeda_idle_finish(unsigned long val) { return psci_ops.cpu_suspend(CALXEDA_IDLE_PARAM, __pa(cpu_resume)); } static int calxeda_pwrdown_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { cpu_pm_enter(); cpu_suspend(0, calxeda_idle_finish); cpu_pm_exit(); return index; } static struct cpuidle_driver calxeda_idle_driver = { .name = "calxeda_idle", .states = { ARM_CPUIDLE_WFI_STATE, { .name = "PG", .desc = "Power Gate", .exit_latency = 30, .power_usage = 50, .target_residency = 200, .enter = calxeda_pwrdown_idle, }, }, .state_count = 2, }; static int calxeda_cpuidle_probe(struct platform_device *pdev) { return cpuidle_register(&calxeda_idle_driver, NULL); } static struct platform_driver calxeda_cpuidle_plat_driver = { .driver = { .name = "cpuidle-calxeda", }, .probe = calxeda_cpuidle_probe, }; builtin_platform_driver(calxeda_cpuidle_plat_driver);
gpl-2.0
ziqiaozhou/cachebar
source/drivers/iio/industrialio-triggered-buffer.c
929
3435
/* * Copyright (c) 2012 Analog Devices, Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/module.h> #include <linux/iio/iio.h> #include <linux/iio/buffer.h> #include <linux/iio/kfifo_buf.h> #include <linux/iio/triggered_buffer.h> #include <linux/iio/trigger_consumer.h> static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = { .postenable = &iio_triggered_buffer_postenable, .predisable = &iio_triggered_buffer_predisable, }; /** * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc * @indio_dev: IIO device structure * @pollfunc_bh: Function which will be used as pollfunc bottom half * @pollfunc_th: Function which will be used as pollfunc top half * @setup_ops: Buffer setup functions to use for this device. * If NULL the default setup functions for triggered * buffers will be used. * * This function combines some common tasks which will normally be performed * when setting up a triggered buffer. It will allocate the buffer and the * pollfunc, as well as register the buffer with the IIO core. * * Before calling this function the indio_dev structure should already be * completely initialized, but not yet registered. In practice this means that * this function should be called right before iio_device_register(). * * To free the resources allocated by this function call * iio_triggered_buffer_cleanup(). */ int iio_triggered_buffer_setup(struct iio_dev *indio_dev, irqreturn_t (*pollfunc_bh)(int irq, void *p), irqreturn_t (*pollfunc_th)(int irq, void *p), const struct iio_buffer_setup_ops *setup_ops) { struct iio_buffer *buffer; int ret; buffer = iio_kfifo_allocate(indio_dev); if (!buffer) { ret = -ENOMEM; goto error_ret; } iio_device_attach_buffer(indio_dev, buffer); indio_dev->pollfunc = iio_alloc_pollfunc(pollfunc_bh, pollfunc_th, IRQF_ONESHOT, indio_dev, "%s_consumer%d", indio_dev->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_kfifo_free; } /* Ring buffer functions - here trigger setup related */ if (setup_ops) indio_dev->setup_ops = setup_ops; else indio_dev->setup_ops = &iio_triggered_buffer_setup_ops; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; ret = iio_buffer_register(indio_dev, indio_dev->channels, indio_dev->num_channels); if (ret) goto error_dealloc_pollfunc; return 0; error_dealloc_pollfunc: iio_dealloc_pollfunc(indio_dev->pollfunc); error_kfifo_free: iio_kfifo_free(indio_dev->buffer); error_ret: return ret; } EXPORT_SYMBOL(iio_triggered_buffer_setup); /** * iio_triggered_buffer_cleanup() - Free resources allocated by iio_triggered_buffer_setup() * @indio_dev: IIO device structure */ void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev) { iio_buffer_unregister(indio_dev); iio_dealloc_pollfunc(indio_dev->pollfunc); iio_kfifo_free(indio_dev->buffer); } EXPORT_SYMBOL(iio_triggered_buffer_cleanup); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("IIO helper functions for setting up triggered buffers"); MODULE_LICENSE("GPL");
gpl-2.0
VegaDevTeam/android_kernel_pantech_ef52k
arch/arm/mach-msm/rpc_server_handset.c
1185
18297
/* arch/arm/mach-msm/rpc_server_handset.c * * Copyright (c) 2008-2010,2012 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/switch.h> #include <asm/mach-types.h> #include <mach/msm_rpcrouter.h> #include <mach/board.h> #include <mach/rpc_server_handset.h> #define DRIVER_NAME "msm-handset" #define HS_SERVER_PROG 0x30000062 #define HS_SERVER_VERS 0x00010001 #define HS_RPC_PROG 0x30000091 #define HS_PROCESS_CMD_PROC 0x02 #define HS_SUBSCRIBE_SRVC_PROC 0x03 #define HS_REPORT_EVNT_PROC 0x05 #define HS_EVENT_CB_PROC 1 #define HS_EVENT_DATA_VER 1 #define RPC_KEYPAD_NULL_PROC 0 #define RPC_KEYPAD_PASS_KEY_CODE_PROC 2 #define RPC_KEYPAD_SET_PWR_KEY_STATE_PROC 3 #define HS_PWR_K 0x6F /* Power key */ #define HS_END_K 0x51 /* End key or Power key */ #define HS_STEREO_HEADSET_K 0x82 #define HS_HEADSET_SWITCH_K 0x84 #define HS_HEADSET_SWITCH_2_K 0xF0 #define HS_HEADSET_SWITCH_3_K 0xF1 #define HS_HEADSET_HEADPHONE_K 0xF6 #define HS_HEADSET_MICROPHONE_K 0xF7 #define HS_REL_K 0xFF /* key release */ #define SW_HEADPHONE_INSERT_W_MIC 1 /* HS with mic */ #define KEY(hs_key, input_key) ((hs_key << 24) | input_key) enum hs_event { HS_EVNT_EXT_PWR = 0, /* External Power status */ HS_EVNT_HSD, /* Headset Detection */ HS_EVNT_HSTD, /* Headset Type Detection */ HS_EVNT_HSSD, /* Headset Switch Detection */ HS_EVNT_KPD, HS_EVNT_FLIP, /* Flip / Clamshell status (open/close) */ HS_EVNT_CHARGER, /* Battery is being charged or not */ HS_EVNT_ENV, /* Events from runtime environment like DEM */ HS_EVNT_REM, /* Events received from HS counterpart on a remote processor*/ HS_EVNT_DIAG, /* Diag Events */ HS_EVNT_LAST, /* Should always be the last event type */ HS_EVNT_MAX /* Force enum to be an 32-bit number */ }; enum hs_src_state { HS_SRC_STATE_UNKWN = 0, HS_SRC_STATE_LO, HS_SRC_STATE_HI, }; struct hs_event_data { uint32_t ver; /* Version number */ enum hs_event event_type; /* Event Type */ enum hs_event enum_disc; /* discriminator */ uint32_t data_length; /* length of the next field */ enum hs_src_state data; /* Pointer to data */ uint32_t data_size; /* Elements to be processed in data */ }; enum hs_return_value { HS_EKPDLOCKED = -2, /* Operation failed because keypad is locked */ HS_ENOTSUPPORTED = -1, /* Functionality not supported */ HS_FALSE = 0, /* Inquired condition is not true */ HS_FAILURE = 0, /* Requested operation was not successful */ HS_TRUE = 1, /* Inquired condition is true */ HS_SUCCESS = 1, /* Requested operation was successful */ HS_MAX_RETURN = 0x7FFFFFFF/* Force enum to be a 32 bit number */ }; struct hs_key_data { uint32_t ver; /* Version number to track sturcture changes */ uint32_t code; /* which key? */ uint32_t parm; /* key status. Up/down or pressed/released */ }; enum hs_subs_srvc { HS_SUBS_SEND_CMD = 0, /* Subscribe to send commands to HS */ HS_SUBS_RCV_EVNT, /* Subscribe to receive Events from HS */ HS_SUBS_SRVC_MAX }; enum hs_subs_req { HS_SUBS_REGISTER, /* Subscribe */ HS_SUBS_CANCEL, /* Unsubscribe */ HS_SUB_STATUS_MAX }; enum hs_event_class { HS_EVNT_CLASS_ALL = 0, /* All HS events */ HS_EVNT_CLASS_LAST, /* Should always be the last class type */ HS_EVNT_CLASS_MAX }; enum hs_cmd_class { HS_CMD_CLASS_LCD = 0, /* Send LCD related commands */ HS_CMD_CLASS_KPD, /* Send KPD related commands */ HS_CMD_CLASS_LAST, /* Should always be the last class type */ HS_CMD_CLASS_MAX }; /* * Receive events or send command */ union hs_subs_class { enum hs_event_class evnt; enum hs_cmd_class cmd; }; struct hs_subs { uint32_t ver; enum hs_subs_srvc srvc; /* commands or events */ enum hs_subs_req req; /* subscribe or unsubscribe */ uint32_t host_os; enum hs_subs_req disc; /* discriminator */ union hs_subs_class id; }; struct hs_event_cb_recv { uint32_t cb_id; uint32_t hs_key_data_ptr; struct hs_key_data key; }; enum hs_ext_cmd_type { HS_EXT_CMD_KPD_SEND_KEY = 0, /* Send Key */ HS_EXT_CMD_KPD_BKLT_CTRL, /* Keypad backlight intensity */ HS_EXT_CMD_LCD_BKLT_CTRL, /* LCD Backlight intensity */ HS_EXT_CMD_DIAG_KEYMAP, /* Emulating a Diag key sequence */ HS_EXT_CMD_DIAG_LOCK, /* Device Lock/Unlock */ HS_EXT_CMD_GET_EVNT_STATUS, /* Get the status for one of the drivers */ HS_EXT_CMD_KPD_GET_KEYS_STATUS,/* Get a list of keys status */ HS_EXT_CMD_KPD_SET_PWR_KEY_RST_THOLD, /* PWR Key HW Reset duration */ HS_EXT_CMD_KPD_SET_PWR_KEY_THOLD, /* Set pwr key threshold duration */ HS_EXT_CMD_LAST, /* Should always be the last command type */ HS_EXT_CMD_MAX = 0x7FFFFFFF /* Force enum to be an 32-bit number */ }; struct hs_cmd_data_type { uint32_t hs_cmd_data_type_ptr; /* hs_cmd_data_type ptr length */ uint32_t ver; /* version */ enum hs_ext_cmd_type id; /* command id */ uint32_t handle; /* handle returned from subscribe proc */ enum hs_ext_cmd_type disc_id1; /* discriminator id */ uint32_t input_ptr; /* input ptr length */ uint32_t input_val; /* command specific data */ uint32_t input_len; /* length of command input */ enum hs_ext_cmd_type disc_id2; /* discriminator id */ uint32_t output_len; /* length of output data */ uint32_t delayed; /* execution context for modem true - caller context false - hs task context*/ }; static const uint32_t hs_key_map[] = { KEY(HS_PWR_K, KEY_POWER), KEY(HS_END_K, KEY_END), KEY(HS_STEREO_HEADSET_K, SW_HEADPHONE_INSERT_W_MIC), KEY(HS_HEADSET_HEADPHONE_K, SW_HEADPHONE_INSERT), KEY(HS_HEADSET_MICROPHONE_K, SW_MICROPHONE_INSERT), KEY(HS_HEADSET_SWITCH_K, KEY_MEDIA), KEY(HS_HEADSET_SWITCH_2_K, KEY_VOLUMEUP), KEY(HS_HEADSET_SWITCH_3_K, KEY_VOLUMEDOWN), 0 }; enum { NO_DEVICE = 0, MSM_HEADSET = 1, }; /* Add newer versions at the top of array */ static const unsigned int rpc_vers[] = { 0x00030001, 0x00020001, 0x00010001, }; /* hs subscription request parameters */ struct hs_subs_rpc_req { uint32_t hs_subs_ptr; struct hs_subs hs_subs; uint32_t hs_cb_id; uint32_t hs_handle_ptr; uint32_t hs_handle_data; }; static struct hs_subs_rpc_req *hs_subs_req; struct msm_handset { struct input_dev *ipdev; struct switch_dev sdev; struct msm_handset_platform_data *hs_pdata; bool mic_on, hs_on; }; static struct msm_rpc_client *rpc_client; static struct msm_handset *hs; static int hs_find_key(uint32_t hscode) { int i, key; key = KEY(hscode, 0); for (i = 0; hs_key_map[i] != 0; i++) { if ((hs_key_map[i] & 0xff000000) == key) return hs_key_map[i] & 0x00ffffff; } return -1; } static void update_state(void) { int state; if (hs->mic_on && hs->hs_on) state = 1 << 0; else if (hs->hs_on) state = 1 << 1; else if (hs->mic_on) state = 1 << 2; else state = 0; switch_set_state(&hs->sdev, state); } /* * tuple format: (key_code, key_param) * * old-architecture: * key-press = (key_code, 0) * key-release = (0xff, key_code) * * new-architecutre: * key-press = (key_code, 0) * key-release = (key_code, 0xff) */ static void report_hs_key(uint32_t key_code, uint32_t key_parm) { int key, temp_key_code; if (key_code == HS_REL_K) key = hs_find_key(key_parm); else key = hs_find_key(key_code); temp_key_code = key_code; if (key_parm == HS_REL_K) key_code = key_parm; switch (key) { case KEY_POWER: case KEY_END: case KEY_MEDIA: case KEY_VOLUMEUP: case KEY_VOLUMEDOWN: input_report_key(hs->ipdev, key, (key_code != HS_REL_K)); break; case SW_HEADPHONE_INSERT_W_MIC: hs->mic_on = hs->hs_on = (key_code != HS_REL_K) ? 1 : 0; input_report_switch(hs->ipdev, SW_HEADPHONE_INSERT, hs->hs_on); input_report_switch(hs->ipdev, SW_MICROPHONE_INSERT, hs->mic_on); update_state(); break; case SW_HEADPHONE_INSERT: hs->hs_on = (key_code != HS_REL_K) ? 1 : 0; input_report_switch(hs->ipdev, key, hs->hs_on); update_state(); break; case SW_MICROPHONE_INSERT: hs->mic_on = (key_code != HS_REL_K) ? 1 : 0; input_report_switch(hs->ipdev, key, hs->mic_on); update_state(); break; case -1: printk(KERN_ERR "%s: No mapping for remote handset event %d\n", __func__, temp_key_code); return; } input_sync(hs->ipdev); } static int handle_hs_rpc_call(struct msm_rpc_server *server, struct rpc_request_hdr *req, unsigned len) { struct rpc_keypad_pass_key_code_args { uint32_t key_code; uint32_t key_parm; }; switch (req->procedure) { case RPC_KEYPAD_NULL_PROC: return 0; case RPC_KEYPAD_PASS_KEY_CODE_PROC: { struct rpc_keypad_pass_key_code_args *args; args = (struct rpc_keypad_pass_key_code_args *)(req + 1); args->key_code = be32_to_cpu(args->key_code); args->key_parm = be32_to_cpu(args->key_parm); report_hs_key(args->key_code, args->key_parm); return 0; } case RPC_KEYPAD_SET_PWR_KEY_STATE_PROC: /* This RPC function must be available for the ARM9 * to function properly. This function is redundant * when RPC_KEYPAD_PASS_KEY_CODE_PROC is handled. So * input_report_key is not needed. */ return 0; default: return -ENODEV; } } static struct msm_rpc_server hs_rpc_server = { .prog = HS_SERVER_PROG, .vers = HS_SERVER_VERS, .rpc_call = handle_hs_rpc_call, }; static int process_subs_srvc_callback(struct hs_event_cb_recv *recv) { if (!recv) return -ENODATA; report_hs_key(be32_to_cpu(recv->key.code), be32_to_cpu(recv->key.parm)); return 0; } static void process_hs_rpc_request(uint32_t proc, void *data) { if (proc == HS_EVENT_CB_PROC) process_subs_srvc_callback(data); else pr_err("%s: unknown rpc proc %d\n", __func__, proc); } static int hs_rpc_report_event_arg(struct msm_rpc_client *client, void *buffer, void *data) { struct hs_event_rpc_req { uint32_t hs_event_data_ptr; struct hs_event_data data; }; struct hs_event_rpc_req *req = buffer; req->hs_event_data_ptr = cpu_to_be32(0x1); req->data.ver = cpu_to_be32(HS_EVENT_DATA_VER); req->data.event_type = cpu_to_be32(HS_EVNT_HSD); req->data.enum_disc = cpu_to_be32(HS_EVNT_HSD); req->data.data_length = cpu_to_be32(0x1); req->data.data = cpu_to_be32(*(enum hs_src_state *)data); req->data.data_size = cpu_to_be32(sizeof(enum hs_src_state)); return sizeof(*req); } static int hs_rpc_report_event_res(struct msm_rpc_client *client, void *buffer, void *data) { enum hs_return_value result; result = be32_to_cpu(*(enum hs_return_value *)buffer); pr_debug("%s: request completed: 0x%x\n", __func__, result); if (result == HS_SUCCESS) return 0; return 1; } void report_headset_status(bool connected) { int rc = -1; enum hs_src_state status; if (connected == true) status = HS_SRC_STATE_HI; else status = HS_SRC_STATE_LO; rc = msm_rpc_client_req(rpc_client, HS_REPORT_EVNT_PROC, hs_rpc_report_event_arg, &status, hs_rpc_report_event_res, NULL, -1); if (rc) pr_err("%s: couldn't send rpc client request\n", __func__); } EXPORT_SYMBOL(report_headset_status); static int hs_rpc_pwr_cmd_arg(struct msm_rpc_client *client, void *buffer, void *data) { struct hs_cmd_data_type *hs_pwr_cmd = buffer; hs_pwr_cmd->hs_cmd_data_type_ptr = cpu_to_be32(0x01); hs_pwr_cmd->ver = cpu_to_be32(0x03); hs_pwr_cmd->id = cpu_to_be32(HS_EXT_CMD_KPD_SET_PWR_KEY_THOLD); hs_pwr_cmd->handle = cpu_to_be32(hs_subs_req->hs_handle_data); hs_pwr_cmd->disc_id1 = cpu_to_be32(HS_EXT_CMD_KPD_SET_PWR_KEY_THOLD); hs_pwr_cmd->input_ptr = cpu_to_be32(0x01); hs_pwr_cmd->input_val = cpu_to_be32(hs->hs_pdata->pwr_key_delay_ms); hs_pwr_cmd->input_len = cpu_to_be32(0x01); hs_pwr_cmd->disc_id2 = cpu_to_be32(HS_EXT_CMD_KPD_SET_PWR_KEY_THOLD); hs_pwr_cmd->output_len = cpu_to_be32(0x00); hs_pwr_cmd->delayed = cpu_to_be32(0x00); return sizeof(*hs_pwr_cmd); } static int hs_rpc_pwr_cmd_res(struct msm_rpc_client *client, void *buffer, void *data) { uint32_t result; result = be32_to_cpu(*((uint32_t *)buffer)); pr_debug("%s: request completed: 0x%x\n", __func__, result); return 0; } static int hs_rpc_register_subs_arg(struct msm_rpc_client *client, void *buffer, void *data) { hs_subs_req = buffer; hs_subs_req->hs_subs_ptr = cpu_to_be32(0x1); hs_subs_req->hs_subs.ver = cpu_to_be32(0x1); hs_subs_req->hs_subs.srvc = cpu_to_be32(HS_SUBS_RCV_EVNT); hs_subs_req->hs_subs.req = cpu_to_be32(HS_SUBS_REGISTER); hs_subs_req->hs_subs.host_os = cpu_to_be32(0x4); /* linux */ hs_subs_req->hs_subs.disc = cpu_to_be32(HS_SUBS_RCV_EVNT); hs_subs_req->hs_subs.id.evnt = cpu_to_be32(HS_EVNT_CLASS_ALL); hs_subs_req->hs_cb_id = cpu_to_be32(0x1); hs_subs_req->hs_handle_ptr = cpu_to_be32(0x1); hs_subs_req->hs_handle_data = cpu_to_be32(0x0); return sizeof(*hs_subs_req); } static int hs_rpc_register_subs_res(struct msm_rpc_client *client, void *buffer, void *data) { uint32_t result; result = be32_to_cpu(*((uint32_t *)buffer)); pr_debug("%s: request completed: 0x%x\n", __func__, result); return 0; } static int hs_cb_func(struct msm_rpc_client *client, void *buffer, int in_size) { int rc = -1; struct rpc_request_hdr *hdr = buffer; hdr->type = be32_to_cpu(hdr->type); hdr->xid = be32_to_cpu(hdr->xid); hdr->rpc_vers = be32_to_cpu(hdr->rpc_vers); hdr->prog = be32_to_cpu(hdr->prog); hdr->vers = be32_to_cpu(hdr->vers); hdr->procedure = be32_to_cpu(hdr->procedure); process_hs_rpc_request(hdr->procedure, (void *) (hdr + 1)); msm_rpc_start_accepted_reply(client, hdr->xid, RPC_ACCEPTSTAT_SUCCESS); rc = msm_rpc_send_accepted_reply(client, 0); if (rc) { pr_err("%s: sending reply failed: %d\n", __func__, rc); return rc; } return 0; } static int __devinit hs_rpc_cb_init(void) { int rc = 0, i, num_vers; num_vers = ARRAY_SIZE(rpc_vers); for (i = 0; i < num_vers; i++) { rpc_client = msm_rpc_register_client("hs", HS_RPC_PROG, rpc_vers[i], 0, hs_cb_func); if (IS_ERR(rpc_client)) pr_debug("%s: RPC Client version %d failed, fallback\n", __func__, rpc_vers[i]); else break; } if (IS_ERR(rpc_client)) { pr_err("%s: Incompatible RPC version error %ld\n", __func__, PTR_ERR(rpc_client)); return PTR_ERR(rpc_client); } rc = msm_rpc_client_req(rpc_client, HS_SUBSCRIBE_SRVC_PROC, hs_rpc_register_subs_arg, NULL, hs_rpc_register_subs_res, NULL, -1); if (rc) { pr_err("%s: RPC client request failed for subscribe services\n", __func__); goto err_client_req; } rc = msm_rpc_client_req(rpc_client, HS_PROCESS_CMD_PROC, hs_rpc_pwr_cmd_arg, NULL, hs_rpc_pwr_cmd_res, NULL, -1); if (rc) pr_err("%s: RPC client request failed for pwr key" " delay cmd, using normal mode\n", __func__); return 0; err_client_req: msm_rpc_unregister_client(rpc_client); return rc; } static int __devinit hs_rpc_init(void) { int rc; rc = hs_rpc_cb_init(); if (rc) { pr_err("%s: failed to initialize rpc client, try server...\n", __func__); rc = msm_rpc_create_server(&hs_rpc_server); if (rc) { pr_err("%s: failed to create rpc server\n", __func__); return rc; } } return rc; } static void __devexit hs_rpc_deinit(void) { if (rpc_client) msm_rpc_unregister_client(rpc_client); } static ssize_t msm_headset_print_name(struct switch_dev *sdev, char *buf) { switch (switch_get_state(&hs->sdev)) { case NO_DEVICE: return sprintf(buf, "No Device\n"); case MSM_HEADSET: return sprintf(buf, "Headset\n"); } return -EINVAL; } static int __devinit hs_probe(struct platform_device *pdev) { int rc = 0; struct input_dev *ipdev; hs = kzalloc(sizeof(struct msm_handset), GFP_KERNEL); if (!hs) return -ENOMEM; hs->sdev.name = "h2w"; hs->sdev.print_name = msm_headset_print_name; rc = switch_dev_register(&hs->sdev); if (rc) goto err_switch_dev_register; ipdev = input_allocate_device(); if (!ipdev) { rc = -ENOMEM; goto err_alloc_input_dev; } input_set_drvdata(ipdev, hs); hs->ipdev = ipdev; if (pdev->dev.platform_data) hs->hs_pdata = pdev->dev.platform_data; if (hs->hs_pdata->hs_name) ipdev->name = hs->hs_pdata->hs_name; else ipdev->name = DRIVER_NAME; ipdev->id.vendor = 0x0001; ipdev->id.product = 1; ipdev->id.version = 1; input_set_capability(ipdev, EV_KEY, KEY_MEDIA); input_set_capability(ipdev, EV_KEY, KEY_VOLUMEUP); input_set_capability(ipdev, EV_KEY, KEY_VOLUMEDOWN); input_set_capability(ipdev, EV_SW, SW_HEADPHONE_INSERT); input_set_capability(ipdev, EV_SW, SW_MICROPHONE_INSERT); input_set_capability(ipdev, EV_KEY, KEY_POWER); input_set_capability(ipdev, EV_KEY, KEY_END); rc = input_register_device(ipdev); if (rc) { dev_err(&ipdev->dev, "hs_probe: input_register_device rc=%d\n", rc); goto err_reg_input_dev; } platform_set_drvdata(pdev, hs); rc = hs_rpc_init(); if (rc) { dev_err(&ipdev->dev, "rpc init failure\n"); goto err_hs_rpc_init; } return 0; err_hs_rpc_init: input_unregister_device(ipdev); ipdev = NULL; err_reg_input_dev: input_free_device(ipdev); err_alloc_input_dev: switch_dev_unregister(&hs->sdev); err_switch_dev_register: kfree(hs); return rc; } static int __devexit hs_remove(struct platform_device *pdev) { struct msm_handset *hs = platform_get_drvdata(pdev); input_unregister_device(hs->ipdev); switch_dev_unregister(&hs->sdev); kfree(hs); hs_rpc_deinit(); return 0; } static struct platform_driver hs_driver = { .probe = hs_probe, .remove = __devexit_p(hs_remove), .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; static int __init hs_init(void) { return platform_driver_register(&hs_driver); } late_initcall(hs_init); static void __exit hs_exit(void) { platform_driver_unregister(&hs_driver); } module_exit(hs_exit); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:msm-handset");
gpl-2.0
bedwa/P6800-Kernel
arch/x86/pci/direct.c
1185
6591
/* * direct.c - Low-level direct PCI config space access */ #include <linux/pci.h> #include <linux/init.h> #include <linux/dmi.h> #include <asm/pci_x86.h> /* * Functions for accessing PCI base (first 256 bytes) and extended * (4096 bytes per PCI function) configuration space with type 1 * accesses. */ #define PCI_CONF1_ADDRESS(bus, devfn, reg) \ (0x80000000 | ((reg & 0xF00) << 16) | (bus << 16) \ | (devfn << 8) | (reg & 0xFC)) static int pci_conf1_read(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 *value) { unsigned long flags; if ((bus > 255) || (devfn > 255) || (reg > 4095)) { *value = -1; return -EINVAL; } raw_spin_lock_irqsave(&pci_config_lock, flags); outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8); switch (len) { case 1: *value = inb(0xCFC + (reg & 3)); break; case 2: *value = inw(0xCFC + (reg & 2)); break; case 4: *value = inl(0xCFC); break; } raw_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } static int pci_conf1_write(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 value) { unsigned long flags; if ((bus > 255) || (devfn > 255) || (reg > 4095)) return -EINVAL; raw_spin_lock_irqsave(&pci_config_lock, flags); outl(PCI_CONF1_ADDRESS(bus, devfn, reg), 0xCF8); switch (len) { case 1: outb((u8)value, 0xCFC + (reg & 3)); break; case 2: outw((u16)value, 0xCFC + (reg & 2)); break; case 4: outl((u32)value, 0xCFC); break; } raw_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } #undef PCI_CONF1_ADDRESS struct pci_raw_ops pci_direct_conf1 = { .read = pci_conf1_read, .write = pci_conf1_write, }; /* * Functions for accessing PCI configuration space with type 2 accesses */ #define PCI_CONF2_ADDRESS(dev, reg) (u16)(0xC000 | (dev << 8) | reg) static int pci_conf2_read(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 *value) { unsigned long flags; int dev, fn; if ((bus > 255) || (devfn > 255) || (reg > 255)) { *value = -1; return -EINVAL; } dev = PCI_SLOT(devfn); fn = PCI_FUNC(devfn); if (dev & 0x10) return PCIBIOS_DEVICE_NOT_FOUND; raw_spin_lock_irqsave(&pci_config_lock, flags); outb((u8)(0xF0 | (fn << 1)), 0xCF8); outb((u8)bus, 0xCFA); switch (len) { case 1: *value = inb(PCI_CONF2_ADDRESS(dev, reg)); break; case 2: *value = inw(PCI_CONF2_ADDRESS(dev, reg)); break; case 4: *value = inl(PCI_CONF2_ADDRESS(dev, reg)); break; } outb(0, 0xCF8); raw_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } static int pci_conf2_write(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 value) { unsigned long flags; int dev, fn; if ((bus > 255) || (devfn > 255) || (reg > 255)) return -EINVAL; dev = PCI_SLOT(devfn); fn = PCI_FUNC(devfn); if (dev & 0x10) return PCIBIOS_DEVICE_NOT_FOUND; raw_spin_lock_irqsave(&pci_config_lock, flags); outb((u8)(0xF0 | (fn << 1)), 0xCF8); outb((u8)bus, 0xCFA); switch (len) { case 1: outb((u8)value, PCI_CONF2_ADDRESS(dev, reg)); break; case 2: outw((u16)value, PCI_CONF2_ADDRESS(dev, reg)); break; case 4: outl((u32)value, PCI_CONF2_ADDRESS(dev, reg)); break; } outb(0, 0xCF8); raw_spin_unlock_irqrestore(&pci_config_lock, flags); return 0; } #undef PCI_CONF2_ADDRESS struct pci_raw_ops pci_direct_conf2 = { .read = pci_conf2_read, .write = pci_conf2_write, }; /* * Before we decide to use direct hardware access mechanisms, we try to do some * trivial checks to ensure it at least _seems_ to be working -- we just test * whether bus 00 contains a host bridge (this is similar to checking * techniques used in XFree86, but ours should be more reliable since we * attempt to make use of direct access hints provided by the PCI BIOS). * * This should be close to trivial, but it isn't, because there are buggy * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. */ static int __init pci_sanity_check(struct pci_raw_ops *o) { u32 x = 0; int year, devfn; if (pci_probe & PCI_NO_CHECKS) return 1; /* Assume Type 1 works for newer systems. This handles machines that don't have anything on PCI Bus 0. */ dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL); if (year >= 2001) return 1; for (devfn = 0; devfn < 0x100; devfn++) { if (o->read(0, 0, devfn, PCI_CLASS_DEVICE, 2, &x)) continue; if (x == PCI_CLASS_BRIDGE_HOST || x == PCI_CLASS_DISPLAY_VGA) return 1; if (o->read(0, 0, devfn, PCI_VENDOR_ID, 2, &x)) continue; if (x == PCI_VENDOR_ID_INTEL || x == PCI_VENDOR_ID_COMPAQ) return 1; } DBG(KERN_WARNING "PCI: Sanity check failed\n"); return 0; } static int __init pci_check_type1(void) { unsigned long flags; unsigned int tmp; int works = 0; local_irq_save(flags); outb(0x01, 0xCFB); tmp = inl(0xCF8); outl(0x80000000, 0xCF8); if (inl(0xCF8) == 0x80000000 && pci_sanity_check(&pci_direct_conf1)) { works = 1; } outl(tmp, 0xCF8); local_irq_restore(flags); return works; } static int __init pci_check_type2(void) { unsigned long flags; int works = 0; local_irq_save(flags); outb(0x00, 0xCFB); outb(0x00, 0xCF8); outb(0x00, 0xCFA); if (inb(0xCF8) == 0x00 && inb(0xCFA) == 0x00 && pci_sanity_check(&pci_direct_conf2)) { works = 1; } local_irq_restore(flags); return works; } void __init pci_direct_init(int type) { if (type == 0) return; printk(KERN_INFO "PCI: Using configuration type %d for base access\n", type); if (type == 1) { raw_pci_ops = &pci_direct_conf1; if (raw_pci_ext_ops) return; if (!(pci_probe & PCI_HAS_IO_ECS)) return; printk(KERN_INFO "PCI: Using configuration type 1 " "for extended access\n"); raw_pci_ext_ops = &pci_direct_conf1; return; } raw_pci_ops = &pci_direct_conf2; } int __init pci_direct_probe(void) { struct resource *region, *region2; if ((pci_probe & PCI_PROBE_CONF1) == 0) goto type2; region = request_region(0xCF8, 8, "PCI conf1"); if (!region) goto type2; if (pci_check_type1()) { raw_pci_ops = &pci_direct_conf1; port_cf9_safe = true; return 1; } release_resource(region); type2: if ((pci_probe & PCI_PROBE_CONF2) == 0) return 0; region = request_region(0xCF8, 4, "PCI conf2"); if (!region) return 0; region2 = request_region(0xC000, 0x1000, "PCI conf2"); if (!region2) goto fail2; if (pci_check_type2()) { raw_pci_ops = &pci_direct_conf2; port_cf9_safe = true; return 2; } release_resource(region2); fail2: release_resource(region); return 0; }
gpl-2.0
drikinukoda/android_kernel_lge_e8lte
drivers/bluetooth/btusb.c
1953
29259
/* * * Generic Bluetooth USB driver * * Copyright (C) 2005-2008 Marcel Holtmann <marcel@holtmann.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/usb.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "0.6" static int ignore_dga; static int ignore_csr; static int ignore_sniffer; static int disable_scofix; static int force_scofix; static int reset = 1; static struct usb_driver btusb_driver; #define BTUSB_IGNORE 0x01 #define BTUSB_DIGIANSWER 0x02 #define BTUSB_CSR 0x04 #define BTUSB_SNIFFER 0x08 #define BTUSB_BCM92035 0x10 #define BTUSB_BROKEN_ISOC 0x20 #define BTUSB_WRONG_SCO_MTU 0x40 #define BTUSB_ATH3012 0x80 static struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, /* Broadcom SoftSailing reporting vendor specific */ { USB_DEVICE(0x05ac, 0x21e1) }, /* Apple MacBookPro 7,1 */ { USB_DEVICE(0x05ac, 0x8213) }, /* Apple iMac11,1 */ { USB_DEVICE(0x05ac, 0x8215) }, /* Apple MacBookPro6,2 */ { USB_DEVICE(0x05ac, 0x8218) }, /* Apple MacBookAir3,1, MacBookAir3,2 */ { USB_DEVICE(0x05ac, 0x821b) }, /* Apple MacBookAir4,1 */ { USB_DEVICE(0x05ac, 0x821f) }, /* Apple MacBookPro8,2 */ { USB_DEVICE(0x05ac, 0x821a) }, /* Apple MacMini5,1 */ { USB_DEVICE(0x05ac, 0x8281) }, /* AVM BlueFRITZ! USB v2.0 */ { USB_DEVICE(0x057c, 0x3800) }, /* Bluetooth Ultraport Module from IBM */ { USB_DEVICE(0x04bf, 0x030a) }, /* ALPS Modules with non-standard id */ { USB_DEVICE(0x044e, 0x3001) }, { USB_DEVICE(0x044e, 0x3002) }, /* Ericsson with non-standard id */ { USB_DEVICE(0x0bdb, 0x1002) }, /* Canyon CN-BTU1 with HID interfaces */ { USB_DEVICE(0x0c10, 0x0000) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, btusb_table); static struct usb_device_id blacklist_table[] = { /* CSR BlueCore devices */ { USB_DEVICE(0x0a12, 0x0001), .driver_info = BTUSB_CSR }, /* Broadcom BCM2033 without firmware */ { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE }, /* Atheros 3011 with sflash firmware */ { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, /* Atheros 3012 with sflash firmware */ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, /* Broadcom BCM2045 */ { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_WRONG_SCO_MTU }, /* IBM/Lenovo ThinkPad with Broadcom chip */ { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_WRONG_SCO_MTU }, /* HP laptop with Broadcom chip */ { USB_DEVICE(0x03f0, 0x171d), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Dell laptop with Broadcom chip */ { USB_DEVICE(0x413c, 0x8126), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Dell Wireless 370 and 410 devices */ { USB_DEVICE(0x413c, 0x8152), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x413c, 0x8156), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Belkin F8T012 and F8T013 devices */ { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Asus WL-BTD202 device */ { USB_DEVICE(0x0b05, 0x1715), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Kensington Bluetooth USB adapter */ { USB_DEVICE(0x047d, 0x105e), .driver_info = BTUSB_WRONG_SCO_MTU }, /* RTX Telecom based adapters with buggy SCO support */ { USB_DEVICE(0x0400, 0x0807), .driver_info = BTUSB_BROKEN_ISOC }, { USB_DEVICE(0x0400, 0x080a), .driver_info = BTUSB_BROKEN_ISOC }, /* CONWISE Technology based adapters with buggy SCO support */ { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC }, /* Digianswer devices */ { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER }, { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE }, /* CSR BlueCore Bluetooth Sniffer */ { USB_DEVICE(0x0a12, 0x0002), .driver_info = BTUSB_SNIFFER }, /* Frontline ComProbe Bluetooth Sniffer */ { USB_DEVICE(0x16d3, 0x0002), .driver_info = BTUSB_SNIFFER }, { } /* Terminating entry */ }; #define BTUSB_MAX_ISOC_FRAMES 10 #define BTUSB_INTR_RUNNING 0 #define BTUSB_BULK_RUNNING 1 #define BTUSB_ISOC_RUNNING 2 #define BTUSB_SUSPENDING 3 #define BTUSB_DID_ISO_RESUME 4 struct btusb_data { struct hci_dev *hdev; struct usb_device *udev; struct usb_interface *intf; struct usb_interface *isoc; spinlock_t lock; unsigned long flags; struct work_struct work; struct work_struct waker; struct usb_anchor tx_anchor; struct usb_anchor intr_anchor; struct usb_anchor bulk_anchor; struct usb_anchor isoc_anchor; struct usb_anchor deferred; int tx_in_flight; spinlock_t txlock; struct usb_endpoint_descriptor *intr_ep; struct usb_endpoint_descriptor *bulk_tx_ep; struct usb_endpoint_descriptor *bulk_rx_ep; struct usb_endpoint_descriptor *isoc_tx_ep; struct usb_endpoint_descriptor *isoc_rx_ep; __u8 cmdreq_type; unsigned int sco_num; int isoc_altsetting; int suspend_count; }; static int inc_tx(struct btusb_data *data) { unsigned long flags; int rv; spin_lock_irqsave(&data->txlock, flags); rv = test_bit(BTUSB_SUSPENDING, &data->flags); if (!rv) data->tx_in_flight++; spin_unlock_irqrestore(&data->txlock, flags); return rv; } static void btusb_intr_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hdev->driver_data; int err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { hdev->stat.byte_rx += urb->actual_length; if (hci_recv_fragment(hdev, HCI_EVENT_PKT, urb->transfer_buffer, urb->actual_length) < 0) { BT_ERR("%s corrupted event packet", hdev->name); hdev->stat.err_rx++; } } if (!test_bit(BTUSB_INTR_RUNNING, &data->flags)) return; usb_mark_last_busy(data->udev); usb_anchor_urb(urb, &data->intr_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { if (err != -EPERM) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } } static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) { struct btusb_data *data = hdev->driver_data; struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size; BT_DBG("%s", hdev->name); if (!data->intr_ep) return -ENODEV; urb = usb_alloc_urb(0, mem_flags); if (!urb) return -ENOMEM; size = le16_to_cpu(data->intr_ep->wMaxPacketSize); buf = kmalloc(size, mem_flags); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress); usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_intr_complete, hdev, data->intr_ep->bInterval); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &data->intr_anchor); err = usb_submit_urb(urb, mem_flags); if (err < 0) { BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static void btusb_bulk_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hdev->driver_data; int err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { hdev->stat.byte_rx += urb->actual_length; if (hci_recv_fragment(hdev, HCI_ACLDATA_PKT, urb->transfer_buffer, urb->actual_length) < 0) { BT_ERR("%s corrupted ACL packet", hdev->name); hdev->stat.err_rx++; } } if (!test_bit(BTUSB_BULK_RUNNING, &data->flags)) return; usb_anchor_urb(urb, &data->bulk_anchor); usb_mark_last_busy(data->udev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { if (err != -EPERM) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } } static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags) { struct btusb_data *data = hdev->driver_data; struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size = HCI_MAX_FRAME_SIZE; BT_DBG("%s", hdev->name); if (!data->bulk_rx_ep) return -ENODEV; urb = usb_alloc_urb(0, mem_flags); if (!urb) return -ENOMEM; buf = kmalloc(size, mem_flags); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, data->udev, pipe, buf, size, btusb_bulk_complete, hdev); urb->transfer_flags |= URB_FREE_BUFFER; usb_mark_last_busy(data->udev); usb_anchor_urb(urb, &data->bulk_anchor); err = usb_submit_urb(urb, mem_flags); if (err < 0) { BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static void btusb_isoc_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hdev->driver_data; int i, err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { for (i = 0; i < urb->number_of_packets; i++) { unsigned int offset = urb->iso_frame_desc[i].offset; unsigned int length = urb->iso_frame_desc[i].actual_length; if (urb->iso_frame_desc[i].status) continue; hdev->stat.byte_rx += length; if (hci_recv_fragment(hdev, HCI_SCODATA_PKT, urb->transfer_buffer + offset, length) < 0) { BT_ERR("%s corrupted SCO packet", hdev->name); hdev->stat.err_rx++; } } } if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) return; usb_anchor_urb(urb, &data->isoc_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { if (err != -EPERM) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } } static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu) { int i, offset = 0; BT_DBG("len %d mtu %d", len, mtu); for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu; i++, offset += mtu, len -= mtu) { urb->iso_frame_desc[i].offset = offset; urb->iso_frame_desc[i].length = mtu; } if (len && i < BTUSB_MAX_ISOC_FRAMES) { urb->iso_frame_desc[i].offset = offset; urb->iso_frame_desc[i].length = len; i++; } urb->number_of_packets = i; } static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) { struct btusb_data *data = hdev->driver_data; struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size; BT_DBG("%s", hdev->name); if (!data->isoc_rx_ep) return -ENODEV; urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, mem_flags); if (!urb) return -ENOMEM; size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) * BTUSB_MAX_ISOC_FRAMES; buf = kmalloc(size, mem_flags); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); urb->dev = data->udev; urb->pipe = pipe; urb->context = hdev; urb->complete = btusb_isoc_complete; urb->interval = data->isoc_rx_ep->bInterval; urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; urb->transfer_buffer = buf; urb->transfer_buffer_length = size; __fill_isoc_descriptor(urb, size, le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); usb_anchor_urb(urb, &data->isoc_anchor); err = usb_submit_urb(urb, mem_flags); if (err < 0) { BT_ERR("%s urb %p submission failed (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static void btusb_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; struct hci_dev *hdev = (struct hci_dev *) skb->dev; struct btusb_data *data = hdev->driver_data; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; if (!urb->status) hdev->stat.byte_tx += urb->transfer_buffer_length; else hdev->stat.err_tx++; done: spin_lock(&data->txlock); data->tx_in_flight--; spin_unlock(&data->txlock); kfree(urb->setup_packet); kfree_skb(skb); } static void btusb_isoc_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; struct hci_dev *hdev = (struct hci_dev *) skb->dev; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; if (!urb->status) hdev->stat.byte_tx += urb->transfer_buffer_length; else hdev->stat.err_tx++; done: kfree(urb->setup_packet); kfree_skb(skb); } static int btusb_open(struct hci_dev *hdev) { struct btusb_data *data = hdev->driver_data; int err; BT_DBG("%s", hdev->name); err = usb_autopm_get_interface(data->intf); if (err < 0) return err; data->intf->needs_remote_wakeup = 1; if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) goto done; if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) goto done; err = btusb_submit_intr_urb(hdev, GFP_KERNEL); if (err < 0) goto failed; err = btusb_submit_bulk_urb(hdev, GFP_KERNEL); if (err < 0) { usb_kill_anchored_urbs(&data->intr_anchor); goto failed; } set_bit(BTUSB_BULK_RUNNING, &data->flags); btusb_submit_bulk_urb(hdev, GFP_KERNEL); done: usb_autopm_put_interface(data->intf); return 0; failed: clear_bit(BTUSB_INTR_RUNNING, &data->flags); clear_bit(HCI_RUNNING, &hdev->flags); usb_autopm_put_interface(data->intf); return err; } static void btusb_stop_traffic(struct btusb_data *data) { usb_kill_anchored_urbs(&data->intr_anchor); usb_kill_anchored_urbs(&data->bulk_anchor); usb_kill_anchored_urbs(&data->isoc_anchor); } static int btusb_close(struct hci_dev *hdev) { struct btusb_data *data = hdev->driver_data; int err; BT_DBG("%s", hdev->name); if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) return 0; cancel_work_sync(&data->work); cancel_work_sync(&data->waker); clear_bit(BTUSB_ISOC_RUNNING, &data->flags); clear_bit(BTUSB_BULK_RUNNING, &data->flags); clear_bit(BTUSB_INTR_RUNNING, &data->flags); btusb_stop_traffic(data); err = usb_autopm_get_interface(data->intf); if (err < 0) goto failed; data->intf->needs_remote_wakeup = 0; usb_autopm_put_interface(data->intf); failed: usb_scuttle_anchored_urbs(&data->deferred); return 0; } static int btusb_flush(struct hci_dev *hdev) { struct btusb_data *data = hdev->driver_data; BT_DBG("%s", hdev->name); usb_kill_anchored_urbs(&data->tx_anchor); return 0; } static int btusb_send_frame(struct sk_buff *skb) { struct hci_dev *hdev = (struct hci_dev *) skb->dev; struct btusb_data *data = hdev->driver_data; struct usb_ctrlrequest *dr; struct urb *urb; unsigned int pipe; int err; BT_DBG("%s", hdev->name); if (!test_bit(HCI_RUNNING, &hdev->flags)) return -EBUSY; switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; dr = kmalloc(sizeof(*dr), GFP_ATOMIC); if (!dr) { usb_free_urb(urb); return -ENOMEM; } dr->bRequestType = data->cmdreq_type; dr->bRequest = 0; dr->wIndex = 0; dr->wValue = 0; dr->wLength = __cpu_to_le16(skb->len); pipe = usb_sndctrlpipe(data->udev, 0x00); usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, skb->data, skb->len, btusb_tx_complete, skb); hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: if (!data->bulk_tx_ep) return -ENODEV; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, btusb_tx_complete, skb); hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1) return -ENODEV; urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC); if (!urb) return -ENOMEM; pipe = usb_sndisocpipe(data->udev, data->isoc_tx_ep->bEndpointAddress); usb_fill_int_urb(urb, data->udev, pipe, skb->data, skb->len, btusb_isoc_tx_complete, skb, data->isoc_tx_ep->bInterval); urb->transfer_flags = URB_ISO_ASAP; __fill_isoc_descriptor(urb, skb->len, le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); hdev->stat.sco_tx++; goto skip_waking; default: return -EILSEQ; } err = inc_tx(data); if (err) { usb_anchor_urb(urb, &data->deferred); schedule_work(&data->waker); err = 0; goto done; } skip_waking: usb_anchor_urb(urb, &data->tx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { BT_ERR("%s urb %p submission failed", hdev->name, urb); kfree(urb->setup_packet); usb_unanchor_urb(urb); } else { usb_mark_last_busy(data->udev); } usb_free_urb(urb); done: return err; } static void btusb_destruct(struct hci_dev *hdev) { struct btusb_data *data = hdev->driver_data; BT_DBG("%s", hdev->name); kfree(data); } static void btusb_notify(struct hci_dev *hdev, unsigned int evt) { struct btusb_data *data = hdev->driver_data; BT_DBG("%s evt %d", hdev->name, evt); if (hdev->conn_hash.sco_num != data->sco_num) { data->sco_num = hdev->conn_hash.sco_num; schedule_work(&data->work); } } static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting) { struct btusb_data *data = hdev->driver_data; struct usb_interface *intf = data->isoc; struct usb_endpoint_descriptor *ep_desc; int i, err; if (!data->isoc) return -ENODEV; err = usb_set_interface(data->udev, 1, altsetting); if (err < 0) { BT_ERR("%s setting interface failed (%d)", hdev->name, -err); return err; } data->isoc_altsetting = altsetting; data->isoc_tx_ep = NULL; data->isoc_rx_ep = NULL; for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { ep_desc = &intf->cur_altsetting->endpoint[i].desc; if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) { data->isoc_tx_ep = ep_desc; continue; } if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) { data->isoc_rx_ep = ep_desc; continue; } } if (!data->isoc_tx_ep || !data->isoc_rx_ep) { BT_ERR("%s invalid SCO descriptors", hdev->name); return -ENODEV; } return 0; } static void btusb_work(struct work_struct *work) { struct btusb_data *data = container_of(work, struct btusb_data, work); struct hci_dev *hdev = data->hdev; int err; if (hdev->conn_hash.sco_num > 0) { if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf); if (err < 0) { clear_bit(BTUSB_ISOC_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->isoc_anchor); return; } set_bit(BTUSB_DID_ISO_RESUME, &data->flags); } if (data->isoc_altsetting != 2) { clear_bit(BTUSB_ISOC_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->isoc_anchor); if (__set_isoc_interface(hdev, 2) < 0) return; } if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) { if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0) clear_bit(BTUSB_ISOC_RUNNING, &data->flags); else btusb_submit_isoc_urb(hdev, GFP_KERNEL); } } else { clear_bit(BTUSB_ISOC_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->isoc_anchor); __set_isoc_interface(hdev, 0); if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) usb_autopm_put_interface(data->isoc ? data->isoc : data->intf); } } static void btusb_waker(struct work_struct *work) { struct btusb_data *data = container_of(work, struct btusb_data, waker); int err; err = usb_autopm_get_interface(data->intf); if (err < 0) return; usb_autopm_put_interface(data->intf); } static int btusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_endpoint_descriptor *ep_desc; struct btusb_data *data; struct hci_dev *hdev; int i, err; BT_DBG("intf %p id %p", intf, id); /* interface numbers are hardcoded in the spec */ if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; if (!id->driver_info) { const struct usb_device_id *match; match = usb_match_id(intf, blacklist_table); if (match) id = match; } if (id->driver_info == BTUSB_IGNORE) return -ENODEV; if (ignore_dga && id->driver_info & BTUSB_DIGIANSWER) return -ENODEV; if (ignore_csr && id->driver_info & BTUSB_CSR) return -ENODEV; if (ignore_sniffer && id->driver_info & BTUSB_SNIFFER) return -ENODEV; if (id->driver_info & BTUSB_ATH3012) { struct usb_device *udev = interface_to_usbdev(intf); /* Old firmware would otherwise let ath3k driver load * patch and sysconfig files */ if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001) return -ENODEV; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { ep_desc = &intf->cur_altsetting->endpoint[i].desc; if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) { data->intr_ep = ep_desc; continue; } if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) { data->bulk_tx_ep = ep_desc; continue; } if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) { data->bulk_rx_ep = ep_desc; continue; } } if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) { kfree(data); return -ENODEV; } data->cmdreq_type = USB_TYPE_CLASS; data->udev = interface_to_usbdev(intf); data->intf = intf; spin_lock_init(&data->lock); INIT_WORK(&data->work, btusb_work); INIT_WORK(&data->waker, btusb_waker); spin_lock_init(&data->txlock); init_usb_anchor(&data->tx_anchor); init_usb_anchor(&data->intr_anchor); init_usb_anchor(&data->bulk_anchor); init_usb_anchor(&data->isoc_anchor); init_usb_anchor(&data->deferred); hdev = hci_alloc_dev(); if (!hdev) { kfree(data); return -ENOMEM; } hdev->bus = HCI_USB; hdev->driver_data = data; data->hdev = hdev; SET_HCIDEV_DEV(hdev, &intf->dev); hdev->open = btusb_open; hdev->close = btusb_close; hdev->flush = btusb_flush; hdev->send = btusb_send_frame; hdev->destruct = btusb_destruct; hdev->notify = btusb_notify; hdev->owner = THIS_MODULE; /* Interface numbers are hardcoded in the specification */ data->isoc = usb_ifnum_to_if(data->udev, 1); if (!reset) set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { if (!disable_scofix) set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); } if (id->driver_info & BTUSB_BROKEN_ISOC) data->isoc = NULL; if (id->driver_info & BTUSB_DIGIANSWER) { data->cmdreq_type = USB_TYPE_VENDOR; set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); } if (id->driver_info & BTUSB_CSR) { struct usb_device *udev = data->udev; /* Old firmware would otherwise execute USB reset */ if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117) set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); } if (id->driver_info & BTUSB_SNIFFER) { struct usb_device *udev = data->udev; /* New sniffer firmware has crippled HCI interface */ if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); data->isoc = NULL; } if (id->driver_info & BTUSB_BCM92035) { unsigned char cmd[] = { 0x3b, 0xfc, 0x01, 0x00 }; struct sk_buff *skb; skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); if (skb) { memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd)); skb_queue_tail(&hdev->driver_init, skb); } } if (data->isoc) { err = usb_driver_claim_interface(&btusb_driver, data->isoc, data); if (err < 0) { hci_free_dev(hdev); kfree(data); return err; } } err = hci_register_dev(hdev); if (err < 0) { hci_free_dev(hdev); kfree(data); return err; } usb_set_intfdata(intf, data); return 0; } static void btusb_disconnect(struct usb_interface *intf) { struct btusb_data *data = usb_get_intfdata(intf); struct hci_dev *hdev; BT_DBG("intf %p", intf); if (!data) return; hdev = data->hdev; __hci_dev_hold(hdev); usb_set_intfdata(data->intf, NULL); if (data->isoc) usb_set_intfdata(data->isoc, NULL); hci_unregister_dev(hdev); if (intf == data->isoc) usb_driver_release_interface(&btusb_driver, data->intf); else if (data->isoc) usb_driver_release_interface(&btusb_driver, data->isoc); __hci_dev_put(hdev); hci_free_dev(hdev); } #ifdef CONFIG_PM static int btusb_suspend(struct usb_interface *intf, pm_message_t message) { struct btusb_data *data = usb_get_intfdata(intf); BT_DBG("intf %p", intf); if (data->suspend_count++) return 0; spin_lock_irq(&data->txlock); if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) { set_bit(BTUSB_SUSPENDING, &data->flags); spin_unlock_irq(&data->txlock); } else { spin_unlock_irq(&data->txlock); data->suspend_count--; return -EBUSY; } cancel_work_sync(&data->work); btusb_stop_traffic(data); usb_kill_anchored_urbs(&data->tx_anchor); return 0; } static void play_deferred(struct btusb_data *data) { struct urb *urb; int err; while ((urb = usb_get_from_anchor(&data->deferred))) { err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) break; data->tx_in_flight++; } usb_scuttle_anchored_urbs(&data->deferred); } static int btusb_resume(struct usb_interface *intf) { struct btusb_data *data = usb_get_intfdata(intf); struct hci_dev *hdev = data->hdev; int err = 0; BT_DBG("intf %p", intf); if (--data->suspend_count) return 0; if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) { err = btusb_submit_intr_urb(hdev, GFP_NOIO); if (err < 0) { clear_bit(BTUSB_INTR_RUNNING, &data->flags); goto failed; } } if (test_bit(BTUSB_BULK_RUNNING, &data->flags)) { err = btusb_submit_bulk_urb(hdev, GFP_NOIO); if (err < 0) { clear_bit(BTUSB_BULK_RUNNING, &data->flags); goto failed; } btusb_submit_bulk_urb(hdev, GFP_NOIO); } if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) { if (btusb_submit_isoc_urb(hdev, GFP_NOIO) < 0) clear_bit(BTUSB_ISOC_RUNNING, &data->flags); else btusb_submit_isoc_urb(hdev, GFP_NOIO); } spin_lock_irq(&data->txlock); play_deferred(data); clear_bit(BTUSB_SUSPENDING, &data->flags); spin_unlock_irq(&data->txlock); schedule_work(&data->work); return 0; failed: usb_scuttle_anchored_urbs(&data->deferred); done: spin_lock_irq(&data->txlock); clear_bit(BTUSB_SUSPENDING, &data->flags); spin_unlock_irq(&data->txlock); return err; } #endif static struct usb_driver btusb_driver = { .name = "btusb", .probe = btusb_probe, .disconnect = btusb_disconnect, #ifdef CONFIG_PM .suspend = btusb_suspend, .resume = btusb_resume, #endif .id_table = btusb_table, .supports_autosuspend = 1, }; static int __init btusb_init(void) { BT_INFO("Generic Bluetooth USB driver ver %s", VERSION); return usb_register(&btusb_driver); } static void __exit btusb_exit(void) { usb_deregister(&btusb_driver); } module_init(btusb_init); module_exit(btusb_exit); module_param(ignore_dga, bool, 0644); MODULE_PARM_DESC(ignore_dga, "Ignore devices with id 08fd:0001"); module_param(ignore_csr, bool, 0644); MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001"); module_param(ignore_sniffer, bool, 0644); MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002"); module_param(disable_scofix, bool, 0644); MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size"); module_param(force_scofix, bool, 0644); MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size"); module_param(reset, bool, 0644); MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Generic Bluetooth USB driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
gpl-2.0
sktjdgns1189/android_kernel_samsung_kccat6
drivers/atm/nicstar.c
2209
76907
/* * nicstar.c * * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards. * * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME. * It was taken from the frle-0.22 device driver. * As the file doesn't have a copyright notice, in the file * nicstarmac.copyright I put the copyright notice from the * frle-0.22 device driver. * Some code is based on the nicstar driver by M. Welsh. * * Author: Rui Prior (rprior@inescn.pt) * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999 * * * (C) INESC 1999 */ /* * IMPORTANT INFORMATION * * There are currently three types of spinlocks: * * 1 - Per card interrupt spinlock (to protect structures and such) * 2 - Per SCQ scq spinlock * 3 - Per card resource spinlock (to access registers, etc.) * * These must NEVER be grabbed in reverse order. * */ /* Header files */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/atmdev.h> #include <linux/atm.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/types.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/idr.h> #include <asm/io.h> #include <asm/uaccess.h> #include <linux/atomic.h> #include "nicstar.h" #ifdef CONFIG_ATM_NICSTAR_USE_SUNI #include "suni.h" #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 #include "idt77105.h" #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ /* Additional code */ #include "nicstarmac.c" /* Configurable parameters */ #undef PHY_LOOPBACK #undef TX_DEBUG #undef RX_DEBUG #undef GENERAL_DEBUG #undef EXTRA_DEBUG #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know you're going to use only raw ATM */ /* Do not touch these */ #ifdef TX_DEBUG #define TXPRINTK(args...) printk(args) #else #define TXPRINTK(args...) #endif /* TX_DEBUG */ #ifdef RX_DEBUG #define RXPRINTK(args...) printk(args) #else #define RXPRINTK(args...) #endif /* RX_DEBUG */ #ifdef GENERAL_DEBUG #define PRINTK(args...) printk(args) #else #define PRINTK(args...) #endif /* GENERAL_DEBUG */ #ifdef EXTRA_DEBUG #define XPRINTK(args...) printk(args) #else #define XPRINTK(args...) #endif /* EXTRA_DEBUG */ /* Macros */ #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) #define NS_DELAY mdelay(1) #define PTR_DIFF(a, b) ((u32)((unsigned long)(a) - (unsigned long)(b))) #ifndef ATM_SKB #define ATM_SKB(s) (&(s)->atm) #endif #define scq_virt_to_bus(scq, p) \ (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org)) /* Function declarations */ static u32 ns_read_sram(ns_dev * card, u32 sram_address); static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, int count); static int ns_init_card(int i, struct pci_dev *pcidev); static void ns_init_card_error(ns_dev * card, int error); static scq_info *get_scq(ns_dev *card, int size, u32 scd); static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc); static void push_rxbufs(ns_dev *, struct sk_buff *); static irqreturn_t ns_irq_handler(int irq, void *dev_id); static int ns_open(struct atm_vcc *vcc); static void ns_close(struct atm_vcc *vcc); static void fill_tst(ns_dev * card, int n, vc_map * vc); static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb); static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, struct sk_buff *skb); static void process_tsq(ns_dev * card); static void drain_scq(ns_dev * card, scq_info * scq, int pos); static void process_rsq(ns_dev * card); static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe); #ifdef NS_USE_DESTRUCTORS static void ns_sb_destructor(struct sk_buff *sb); static void ns_lb_destructor(struct sk_buff *lb); static void ns_hb_destructor(struct sk_buff *hb); #endif /* NS_USE_DESTRUCTORS */ static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb); static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count); static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb); static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb); static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb); static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page); static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg); #ifdef EXTRA_DEBUG static void which_list(ns_dev * card, struct sk_buff *skb); #endif static void ns_poll(unsigned long arg); static int ns_parse_mac(char *mac, unsigned char *esi); static void ns_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr); static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); /* Global variables */ static struct ns_dev *cards[NS_MAX_CARDS]; static unsigned num_cards; static struct atmdev_ops atm_ops = { .open = ns_open, .close = ns_close, .ioctl = ns_ioctl, .send = ns_send, .phy_put = ns_phy_put, .phy_get = ns_phy_get, .proc_read = ns_proc_read, .owner = THIS_MODULE, }; static struct timer_list ns_timer; static char *mac[NS_MAX_CARDS]; module_param_array(mac, charp, NULL, 0); MODULE_LICENSE("GPL"); /* Functions */ static int nicstar_init_one(struct pci_dev *pcidev, const struct pci_device_id *ent) { static int index = -1; unsigned int error; index++; cards[index] = NULL; error = ns_init_card(index, pcidev); if (error) { cards[index--] = NULL; /* don't increment index */ goto err_out; } return 0; err_out: return -ENODEV; } static void nicstar_remove_one(struct pci_dev *pcidev) { int i, j; ns_dev *card = pci_get_drvdata(pcidev); struct sk_buff *hb; struct sk_buff *iovb; struct sk_buff *lb; struct sk_buff *sb; i = card->index; if (cards[i] == NULL) return; if (card->atmdev->phy && card->atmdev->phy->stop) card->atmdev->phy->stop(card->atmdev); /* Stop everything */ writel(0x00000000, card->membase + CFG); /* De-register device */ atm_dev_deregister(card->atmdev); /* Disable PCI device */ pci_disable_device(pcidev); /* Free up resources */ j = 0; PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { dev_kfree_skb_any(hb); j++; } PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); j = 0; PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count); while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) { dev_kfree_skb_any(iovb); j++; } PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) dev_kfree_skb_any(lb); while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) dev_kfree_skb_any(sb); free_scq(card, card->scq0, NULL); for (j = 0; j < NS_FRSCD_NUM; j++) { if (card->scd2vc[j] != NULL) free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); } idr_destroy(&card->idr); pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, card->rsq.org, card->rsq.dma); pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, card->tsq.org, card->tsq.dma); free_irq(card->pcidev->irq, card); iounmap(card->membase); kfree(card); } static struct pci_device_id nicstar_pci_tbl[] = { { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 }, {0,} /* terminate list */ }; MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); static struct pci_driver nicstar_driver = { .name = "nicstar", .id_table = nicstar_pci_tbl, .probe = nicstar_init_one, .remove = nicstar_remove_one, }; static int __init nicstar_init(void) { unsigned error = 0; /* Initialized to remove compile warning */ XPRINTK("nicstar: nicstar_init() called.\n"); error = pci_register_driver(&nicstar_driver); TXPRINTK("nicstar: TX debug enabled.\n"); RXPRINTK("nicstar: RX debug enabled.\n"); PRINTK("nicstar: General debug enabled.\n"); #ifdef PHY_LOOPBACK printk("nicstar: using PHY loopback.\n"); #endif /* PHY_LOOPBACK */ XPRINTK("nicstar: nicstar_init() returned.\n"); if (!error) { init_timer(&ns_timer); ns_timer.expires = jiffies + NS_POLL_PERIOD; ns_timer.data = 0UL; ns_timer.function = ns_poll; add_timer(&ns_timer); } return error; } static void __exit nicstar_cleanup(void) { XPRINTK("nicstar: nicstar_cleanup() called.\n"); del_timer(&ns_timer); pci_unregister_driver(&nicstar_driver); XPRINTK("nicstar: nicstar_cleanup() returned.\n"); } static u32 ns_read_sram(ns_dev * card, u32 sram_address) { unsigned long flags; u32 data; sram_address <<= 2; sram_address &= 0x0007FFFC; /* address must be dword aligned */ sram_address |= 0x50000000; /* SRAM read command */ spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(sram_address, card->membase + CMD); while (CMD_BUSY(card)) ; data = readl(card->membase + DR0); spin_unlock_irqrestore(&card->res_lock, flags); return data; } static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value, int count) { unsigned long flags; int i, c; count--; /* count range now is 0..3 instead of 1..4 */ c = count; c <<= 2; /* to use increments of 4 */ spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; for (i = 0; i <= c; i += 4) writel(*(value++), card->membase + i); /* Note: DR# registers are the first 4 dwords in nicstar's memspace, so card->membase + DR0 == card->membase */ sram_address <<= 2; sram_address &= 0x0007FFFC; sram_address |= (0x40000000 | count); writel(sram_address, card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); } static int ns_init_card(int i, struct pci_dev *pcidev) { int j; struct ns_dev *card = NULL; unsigned char pci_latency; unsigned error; u32 data; u32 u32d[4]; u32 ns_cfg_rctsize; int bcount; unsigned long membase; error = 0; if (pci_enable_device(pcidev)) { printk("nicstar%d: can't enable PCI device\n", i); error = 2; ns_init_card_error(card, error); return error; } if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) || (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) { printk(KERN_WARNING "nicstar%d: No suitable DMA available.\n", i); error = 2; ns_init_card_error(card, error); return error; } if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) { printk ("nicstar%d: can't allocate memory for device structure.\n", i); error = 2; ns_init_card_error(card, error); return error; } cards[i] = card; spin_lock_init(&card->int_lock); spin_lock_init(&card->res_lock); pci_set_drvdata(pcidev, card); card->index = i; card->atmdev = NULL; card->pcidev = pcidev; membase = pci_resource_start(pcidev, 1); card->membase = ioremap(membase, NS_IOREMAP_SIZE); if (!card->membase) { printk("nicstar%d: can't ioremap() membase.\n", i); error = 3; ns_init_card_error(card, error); return error; } PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase); pci_set_master(pcidev); if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) { printk("nicstar%d: can't read PCI latency timer.\n", i); error = 6; ns_init_card_error(card, error); return error; } #ifdef NS_PCI_LATENCY if (pci_latency < NS_PCI_LATENCY) { PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY); for (j = 1; j < 4; j++) { if (pci_write_config_byte (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) break; } if (j == 4) { printk ("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY); error = 7; ns_init_card_error(card, error); return error; } } #endif /* NS_PCI_LATENCY */ /* Clear timer overflow */ data = readl(card->membase + STAT); if (data & NS_STAT_TMROF) writel(NS_STAT_TMROF, card->membase + STAT); /* Software reset */ writel(NS_CFG_SWRST, card->membase + CFG); NS_DELAY; writel(0x00000000, card->membase + CFG); /* PHY reset */ writel(0x00000008, card->membase + GP); NS_DELAY; writel(0x00000001, card->membase + GP); NS_DELAY; while (CMD_BUSY(card)) ; writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ NS_DELAY; /* Detect PHY type */ while (CMD_BUSY(card)) ; writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); while (CMD_BUSY(card)) ; data = readl(card->membase + DR0); switch (data) { case 0x00000009: printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); card->max_pcr = ATM_25_PCR; while (CMD_BUSY(card)) ; writel(0x00000008, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); /* Clear an eventual pending interrupt */ writel(NS_STAT_SFBQF, card->membase + STAT); #ifdef PHY_LOOPBACK while (CMD_BUSY(card)) ; writel(0x00000022, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); #endif /* PHY_LOOPBACK */ break; case 0x00000030: case 0x00000031: printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); card->max_pcr = ATM_OC3_PCR; #ifdef PHY_LOOPBACK while (CMD_BUSY(card)) ; writel(0x00000002, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); #endif /* PHY_LOOPBACK */ break; default: printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); error = 8; ns_init_card_error(card, error); return error; } writel(0x00000000, card->membase + GP); /* Determine SRAM size */ data = 0x76543210; ns_write_sram(card, 0x1C003, &data, 1); data = 0x89ABCDEF; ns_write_sram(card, 0x14003, &data, 1); if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && ns_read_sram(card, 0x1C003) == 0x76543210) card->sram_size = 128; else card->sram_size = 32; PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); card->rct_size = NS_MAX_RCTSIZE; #if (NS_MAX_RCTSIZE == 4096) if (card->sram_size == 128) printk ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i); #elif (NS_MAX_RCTSIZE == 16384) if (card->sram_size == 32) { printk ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i); card->rct_size = 4096; } #else #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c #endif card->vpibits = NS_VPIBITS; if (card->rct_size == 4096) card->vcibits = 12 - NS_VPIBITS; else /* card->rct_size == 16384 */ card->vcibits = 14 - NS_VPIBITS; /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ if (mac[i] == NULL) nicstar_init_eprom(card->membase); /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ writel(0x00000000, card->membase + VPM); /* Initialize TSQ */ card->tsq.org = pci_alloc_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, &card->tsq.dma); if (card->tsq.org == NULL) { printk("nicstar%d: can't allocate TSQ.\n", i); error = 10; ns_init_card_error(card, error); return error; } card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT); card->tsq.next = card->tsq.base; card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) ns_tsi_init(card->tsq.base + j); writel(0x00000000, card->membase + TSQH); writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB); PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base); /* Initialize RSQ */ card->rsq.org = pci_alloc_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, &card->rsq.dma); if (card->rsq.org == NULL) { printk("nicstar%d: can't allocate RSQ.\n", i); error = 11; ns_init_card_error(card, error); return error; } card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT); card->rsq.next = card->rsq.base; card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) ns_rsqe_init(card->rsq.base + j); writel(0x00000000, card->membase + RSQH); writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB); PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base); /* Initialize SCQ0, the only VBR SCQ used */ card->scq1 = NULL; card->scq2 = NULL; card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0); if (card->scq0 == NULL) { printk("nicstar%d: can't get SCQ0.\n", i); error = 12; ns_init_card_error(card, error); return error; } u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base); u32d[1] = (u32) 0x00000000; u32d[2] = (u32) 0xffffffff; u32d[3] = (u32) 0x00000000; ns_write_sram(card, NS_VRSCD0, u32d, 4); ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ card->scq0->scd = NS_VRSCD0; PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base); /* Initialize TSTs */ card->tst_addr = NS_TST0; card->tst_free_entries = NS_TST_NUM_ENTRIES; data = NS_TST_OPCODE_VARIABLE; for (j = 0; j < NS_TST_NUM_ENTRIES; j++) ns_write_sram(card, NS_TST0 + j, &data, 1); data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); for (j = 0; j < NS_TST_NUM_ENTRIES; j++) ns_write_sram(card, NS_TST1 + j, &data, 1); data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); for (j = 0; j < NS_TST_NUM_ENTRIES; j++) card->tste2vc[j] = NULL; writel(NS_TST0 << 2, card->membase + TSTB); /* Initialize RCT. AAL type is set on opening the VC. */ #ifdef RCQ_SUPPORT u32d[0] = NS_RCTE_RAWCELLINTEN; #else u32d[0] = 0x00000000; #endif /* RCQ_SUPPORT */ u32d[1] = 0x00000000; u32d[2] = 0x00000000; u32d[3] = 0xFFFFFFFF; for (j = 0; j < card->rct_size; j++) ns_write_sram(card, j * 4, u32d, 4); memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map)); for (j = 0; j < NS_FRSCD_NUM; j++) card->scd2vc[j] = NULL; /* Initialize buffer levels */ card->sbnr.min = MIN_SB; card->sbnr.init = NUM_SB; card->sbnr.max = MAX_SB; card->lbnr.min = MIN_LB; card->lbnr.init = NUM_LB; card->lbnr.max = MAX_LB; card->iovnr.min = MIN_IOVB; card->iovnr.init = NUM_IOVB; card->iovnr.max = MAX_IOVB; card->hbnr.min = MIN_HB; card->hbnr.init = NUM_HB; card->hbnr.max = MAX_HB; card->sm_handle = 0x00000000; card->sm_addr = 0x00000000; card->lg_handle = 0x00000000; card->lg_addr = 0x00000000; card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ idr_init(&card->idr); /* Pre-allocate some huge buffers */ skb_queue_head_init(&card->hbpool.queue); card->hbpool.count = 0; for (j = 0; j < NUM_HB; j++) { struct sk_buff *hb; hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) { printk ("nicstar%d: can't allocate %dth of %d huge buffers.\n", i, j, NUM_HB); error = 13; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(hb) = BUF_NONE; skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } /* Allocate large buffers */ skb_queue_head_init(&card->lbpool.queue); card->lbpool.count = 0; /* Not used */ for (j = 0; j < NUM_LB; j++) { struct sk_buff *lb; lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) { printk ("nicstar%d: can't allocate %dth of %d large buffers.\n", i, j, NUM_LB); error = 14; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); /* Due to the implementation of push_rxbufs() this is 1, not 0 */ if (j == 1) { card->rcbuf = lb; card->rawcell = (struct ns_rcqe *) lb->data; card->rawch = NS_PRV_DMA(lb); } } /* Test for strange behaviour which leads to crashes */ if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) { printk ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", i, j, bcount); error = 14; ns_init_card_error(card, error); return error; } /* Allocate small buffers */ skb_queue_head_init(&card->sbpool.queue); card->sbpool.count = 0; /* Not used */ for (j = 0; j < NUM_SB; j++) { struct sk_buff *sb; sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) { printk ("nicstar%d: can't allocate %dth of %d small buffers.\n", i, j, NUM_SB); error = 15; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } /* Test for strange behaviour which leads to crashes */ if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) { printk ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", i, j, bcount); error = 15; ns_init_card_error(card, error); return error; } /* Allocate iovec buffers */ skb_queue_head_init(&card->iovpool.queue); card->iovpool.count = 0; for (j = 0; j < NUM_IOVB; j++) { struct sk_buff *iovb; iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); if (iovb == NULL) { printk ("nicstar%d: can't allocate %dth of %d iovec buffers.\n", i, j, NUM_IOVB); error = 16; ns_init_card_error(card, error); return error; } NS_PRV_BUFTYPE(iovb) = BUF_NONE; skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; } /* Configure NICStAR */ if (card->rct_size == 4096) ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; else /* (card->rct_size == 16384) */ ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; card->efbie = 1; card->intcnt = 0; if (request_irq (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); error = 9; ns_init_card_error(card, error); return error; } /* Register device */ card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, -1, NULL); if (card->atmdev == NULL) { printk("nicstar%d: can't register device.\n", i); error = 17; ns_init_card_error(card, error); return error; } if (ns_parse_mac(mac[i], card->atmdev->esi)) { nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, card->atmdev->esi, 6); if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 0) { nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, card->atmdev->esi, 6); } } printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); card->atmdev->dev_data = card; card->atmdev->ci_range.vpi_bits = card->vpibits; card->atmdev->ci_range.vci_bits = card->vcibits; card->atmdev->link_rate = card->max_pcr; card->atmdev->phy = NULL; #ifdef CONFIG_ATM_NICSTAR_USE_SUNI if (card->max_pcr == ATM_OC3_PCR) suni_init(card->atmdev); #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 if (card->max_pcr == ATM_25_PCR) idt77105_init(card->atmdev); #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ if (card->atmdev->phy && card->atmdev->phy->start) card->atmdev->phy->start(card->atmdev); writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ NS_CFG_PHYIE, card->membase + CFG); num_cards++; return error; } static void ns_init_card_error(ns_dev *card, int error) { if (error >= 17) { writel(0x00000000, card->membase + CFG); } if (error >= 16) { struct sk_buff *iovb; while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) dev_kfree_skb_any(iovb); } if (error >= 15) { struct sk_buff *sb; while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) dev_kfree_skb_any(sb); free_scq(card, card->scq0, NULL); } if (error >= 14) { struct sk_buff *lb; while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) dev_kfree_skb_any(lb); } if (error >= 13) { struct sk_buff *hb; while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) dev_kfree_skb_any(hb); } if (error >= 12) { kfree(card->rsq.org); } if (error >= 11) { kfree(card->tsq.org); } if (error >= 10) { free_irq(card->pcidev->irq, card); } if (error >= 4) { iounmap(card->membase); } if (error >= 3) { pci_disable_device(card->pcidev); kfree(card); } } static scq_info *get_scq(ns_dev *card, int size, u32 scd) { scq_info *scq; int i; if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) return NULL; scq = kmalloc(sizeof(scq_info), GFP_KERNEL); if (!scq) return NULL; scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma); if (!scq->org) { kfree(scq); return NULL; } scq->skb = kmalloc(sizeof(struct sk_buff *) * (size / NS_SCQE_SIZE), GFP_KERNEL); if (!scq->skb) { kfree(scq->org); kfree(scq); return NULL; } scq->num_entries = size / NS_SCQE_SIZE; scq->base = PTR_ALIGN(scq->org, size); scq->next = scq->base; scq->last = scq->base + (scq->num_entries - 1); scq->tail = scq->last; scq->scd = scd; scq->num_entries = size / NS_SCQE_SIZE; scq->tbd_count = 0; init_waitqueue_head(&scq->scqfull_waitq); scq->full = 0; spin_lock_init(&scq->lock); for (i = 0; i < scq->num_entries; i++) scq->skb[i] = NULL; return scq; } /* For variable rate SCQ vcc must be NULL */ static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc) { int i; if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] != NULL) { vcc = ATM_SKB(scq->skb[i])->vcc; if (vcc->pop != NULL) vcc->pop(vcc, scq->skb[i]); else dev_kfree_skb_any(scq->skb[i]); } } else { /* vcc must be != NULL */ if (vcc == NULL) { printk ("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); for (i = 0; i < scq->num_entries; i++) dev_kfree_skb_any(scq->skb[i]); } else for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] != NULL) { if (vcc->pop != NULL) vcc->pop(vcc, scq->skb[i]); else dev_kfree_skb_any(scq->skb[i]); } } } kfree(scq->skb); pci_free_consistent(card->pcidev, 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ? VBR_SCQSIZE : CBR_SCQSIZE), scq->org, scq->dma); kfree(scq); } /* The handles passed must be pointers to the sk_buff containing the small or large buffer(s) cast to u32. */ static void push_rxbufs(ns_dev * card, struct sk_buff *skb) { struct sk_buff *handle1, *handle2; int id1, id2; u32 addr1, addr2; u32 stat; unsigned long flags; /* *BARF* */ handle2 = NULL; addr2 = 0; handle1 = skb; addr1 = pci_map_single(card->pcidev, skb->data, (NS_PRV_BUFTYPE(skb) == BUF_SM ? NS_SMSKBSIZE : NS_LGSKBSIZE), PCI_DMA_TODEVICE); NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */ #ifdef GENERAL_DEBUG if (!addr1) printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index); #endif /* GENERAL_DEBUG */ stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); if (NS_PRV_BUFTYPE(skb) == BUF_SM) { if (!addr2) { if (card->sm_addr) { addr2 = card->sm_addr; handle2 = card->sm_handle; card->sm_addr = 0x00000000; card->sm_handle = 0x00000000; } else { /* (!sm_addr) */ card->sm_addr = addr1; card->sm_handle = handle1; } } } else { /* buf_type == BUF_LG */ if (!addr2) { if (card->lg_addr) { addr2 = card->lg_addr; handle2 = card->lg_handle; card->lg_addr = 0x00000000; card->lg_handle = 0x00000000; } else { /* (!lg_addr) */ card->lg_addr = addr1; card->lg_handle = handle1; } } } if (addr2) { if (NS_PRV_BUFTYPE(skb) == BUF_SM) { if (card->sbfqc >= card->sbnr.max) { skb_unlink(handle1, &card->sbpool.queue); dev_kfree_skb_any(handle1); skb_unlink(handle2, &card->sbpool.queue); dev_kfree_skb_any(handle2); return; } else card->sbfqc += 2; } else { /* (buf_type == BUF_LG) */ if (card->lbfqc >= card->lbnr.max) { skb_unlink(handle1, &card->lbpool.queue); dev_kfree_skb_any(handle1); skb_unlink(handle2, &card->lbpool.queue); dev_kfree_skb_any(handle2); return; } else card->lbfqc += 2; } id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC); if (id1 < 0) goto out; id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC); if (id2 < 0) goto out; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(addr2, card->membase + DR3); writel(id2, card->membase + DR2); writel(addr1, card->membase + DR1); writel(id1, card->membase + DR0); writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb), card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"), addr1, addr2); } if (!card->efbie && card->sbfqc >= card->sbnr.min && card->lbfqc >= card->lbnr.min) { card->efbie = 1; writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG); } out: return; } static irqreturn_t ns_irq_handler(int irq, void *dev_id) { u32 stat_r; ns_dev *card; struct atm_dev *dev; unsigned long flags; card = (ns_dev *) dev_id; dev = card->atmdev; card->intcnt++; PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); spin_lock_irqsave(&card->int_lock, flags); stat_r = readl(card->membase + STAT); /* Transmit Status Indicator has been written to T. S. Queue */ if (stat_r & NS_STAT_TSIF) { TXPRINTK("nicstar%d: TSI interrupt\n", card->index); process_tsq(card); writel(NS_STAT_TSIF, card->membase + STAT); } /* Incomplete CS-PDU has been transmitted */ if (stat_r & NS_STAT_TXICP) { writel(NS_STAT_TXICP, card->membase + STAT); TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", card->index); } /* Transmit Status Queue 7/8 full */ if (stat_r & NS_STAT_TSQF) { writel(NS_STAT_TSQF, card->membase + STAT); PRINTK("nicstar%d: TSQ full.\n", card->index); process_tsq(card); } /* Timer overflow */ if (stat_r & NS_STAT_TMROF) { writel(NS_STAT_TMROF, card->membase + STAT); PRINTK("nicstar%d: Timer overflow.\n", card->index); } /* PHY device interrupt signal active */ if (stat_r & NS_STAT_PHYI) { writel(NS_STAT_PHYI, card->membase + STAT); PRINTK("nicstar%d: PHY interrupt.\n", card->index); if (dev->phy && dev->phy->interrupt) { dev->phy->interrupt(dev); } } /* Small Buffer Queue is full */ if (stat_r & NS_STAT_SFBQF) { writel(NS_STAT_SFBQF, card->membase + STAT); printk("nicstar%d: Small free buffer queue is full.\n", card->index); } /* Large Buffer Queue is full */ if (stat_r & NS_STAT_LFBQF) { writel(NS_STAT_LFBQF, card->membase + STAT); printk("nicstar%d: Large free buffer queue is full.\n", card->index); } /* Receive Status Queue is full */ if (stat_r & NS_STAT_RSQF) { writel(NS_STAT_RSQF, card->membase + STAT); printk("nicstar%d: RSQ full.\n", card->index); process_rsq(card); } /* Complete CS-PDU received */ if (stat_r & NS_STAT_EOPDU) { RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); process_rsq(card); writel(NS_STAT_EOPDU, card->membase + STAT); } /* Raw cell received */ if (stat_r & NS_STAT_RAWCF) { writel(NS_STAT_RAWCF, card->membase + STAT); #ifndef RCQ_SUPPORT printk("nicstar%d: Raw cell received and no support yet...\n", card->index); #endif /* RCQ_SUPPORT */ /* NOTE: the following procedure may keep a raw cell pending until the next interrupt. As this preliminary support is only meant to avoid buffer leakage, this is not an issue. */ while (readl(card->membase + RAWCT) != card->rawch) { if (ns_rcqe_islast(card->rawcell)) { struct sk_buff *oldbuf; oldbuf = card->rcbuf; card->rcbuf = idr_find(&card->idr, ns_rcqe_nextbufhandle(card->rawcell)); card->rawch = NS_PRV_DMA(card->rcbuf); card->rawcell = (struct ns_rcqe *) card->rcbuf->data; recycle_rx_buf(card, oldbuf); } else { card->rawch += NS_RCQE_SIZE; card->rawcell++; } } } /* Small buffer queue is empty */ if (stat_r & NS_STAT_SFBQE) { int i; struct sk_buff *sb; writel(NS_STAT_SFBQE, card->membase + STAT); printk("nicstar%d: Small free buffer queue empty.\n", card->index); for (i = 0; i < card->sbnr.min; i++) { sb = dev_alloc_skb(NS_SMSKBSIZE); if (sb == NULL) { writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); card->efbie = 0; break; } NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } card->sbfqc = i; process_rsq(card); } /* Large buffer queue empty */ if (stat_r & NS_STAT_LFBQE) { int i; struct sk_buff *lb; writel(NS_STAT_LFBQE, card->membase + STAT); printk("nicstar%d: Large free buffer queue empty.\n", card->index); for (i = 0; i < card->lbnr.min; i++) { lb = dev_alloc_skb(NS_LGSKBSIZE); if (lb == NULL) { writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); card->efbie = 0; break; } NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } card->lbfqc = i; process_rsq(card); } /* Receive Status Queue is 7/8 full */ if (stat_r & NS_STAT_RSQAF) { writel(NS_STAT_RSQAF, card->membase + STAT); RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); process_rsq(card); } spin_unlock_irqrestore(&card->int_lock, flags); PRINTK("nicstar%d: end of interrupt service\n", card->index); return IRQ_HANDLED; } static int ns_open(struct atm_vcc *vcc) { ns_dev *card; vc_map *vc; unsigned long tmpl, modl; int tcr, tcra; /* target cell rate, and absolute value */ int n = 0; /* Number of entries in the TST. Initialized to remove the compiler warning. */ u32 u32d[4]; int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler warning. How I wish compilers were clever enough to tell which variables can truly be used uninitialized... */ int inuse; /* tx or rx vc already in use by another vcc */ short vpi = vcc->vpi; int vci = vcc->vci; card = (ns_dev *) vcc->dev->dev_data; PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi, vci); if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { PRINTK("nicstar%d: unsupported AAL.\n", card->index); return -EINVAL; } vc = &(card->vcmap[vpi << card->vcibits | vci]); vcc->dev_data = vc; inuse = 0; if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) inuse = 1; if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) inuse += 2; if (inuse) { printk("nicstar%d: %s vci already in use.\n", card->index, inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); return -EINVAL; } set_bit(ATM_VF_ADDR, &vcc->flags); /* NOTE: You are not allowed to modify an open connection's QOS. To change that, remove the ATM_VF_PARTIAL flag checking. There may be other changes needed to do that. */ if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) { scq_info *scq; set_bit(ATM_VF_PARTIAL, &vcc->flags); if (vcc->qos.txtp.traffic_class == ATM_CBR) { /* Check requested cell rate and availability of SCD */ if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 && vcc->qos.txtp.min_pcr == 0) { PRINTK ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } tcr = atm_pcr_goal(&(vcc->qos.txtp)); tcra = tcr >= 0 ? tcr : -tcr; PRINTK("nicstar%d: target cell rate = %d.\n", card->index, vcc->qos.txtp.max_pcr); tmpl = (unsigned long)tcra *(unsigned long) NS_TST_NUM_ENTRIES; modl = tmpl % card->max_pcr; n = (int)(tmpl / card->max_pcr); if (tcr > 0) { if (modl > 0) n++; } else if (tcr == 0) { if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0) { PRINTK ("nicstar%d: no CBR bandwidth free.\n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } } if (n == 0) { printk ("nicstar%d: selected bandwidth < granularity.\n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } if (n > (card->tst_free_entries - NS_TST_RESERVED)) { PRINTK ("nicstar%d: not enough free CBR bandwidth.\n", card->index); clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EINVAL; } else card->tst_free_entries -= n; XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n); for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) { if (card->scd2vc[frscdi] == NULL) { card->scd2vc[frscdi] = vc; break; } } if (frscdi == NS_FRSCD_NUM) { PRINTK ("nicstar%d: no SCD available for CBR channel.\n", card->index); card->tst_free_entries += n; clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -EBUSY; } vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd); if (scq == NULL) { PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index); card->scd2vc[frscdi] = NULL; card->tst_free_entries += n; clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); return -ENOMEM; } vc->scq = scq; u32d[0] = scq_virt_to_bus(scq, scq->base); u32d[1] = (u32) 0x00000000; u32d[2] = (u32) 0xffffffff; u32d[3] = (u32) 0x00000000; ns_write_sram(card, vc->cbr_scd, u32d, 4); fill_tst(card, n, vc); } else if (vcc->qos.txtp.traffic_class == ATM_UBR) { vc->cbr_scd = 0x00000000; vc->scq = card->scq0; } if (vcc->qos.txtp.traffic_class != ATM_NONE) { vc->tx = 1; vc->tx_vcc = vcc; vc->tbd_count = 0; } if (vcc->qos.rxtp.traffic_class != ATM_NONE) { u32 status; vc->rx = 1; vc->rx_vcc = vcc; vc->rx_iov = NULL; /* Open the connection in hardware */ if (vcc->qos.aal == ATM_AAL5) status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; else /* vcc->qos.aal == ATM_AAL0 */ status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; #ifdef RCQ_SUPPORT status |= NS_RCTE_RAWCELLINTEN; #endif /* RCQ_SUPPORT */ ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) * NS_RCT_ENTRY_SIZE, &status, 1); } } set_bit(ATM_VF_READY, &vcc->flags); return 0; } static void ns_close(struct atm_vcc *vcc) { vc_map *vc; ns_dev *card; u32 data; int i; vc = vcc->dev_data; card = vcc->dev->dev_data; PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, (int)vcc->vpi, vcc->vci); clear_bit(ATM_VF_READY, &vcc->flags); if (vcc->qos.rxtp.traffic_class != ATM_NONE) { u32 addr; unsigned long flags; addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); vc->rx = 0; if (vc->rx_iov != NULL) { struct sk_buff *iovb; u32 stat; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); PRINTK ("nicstar%d: closing a VC with pending rx buffers.\n", card->index); iovb = vc->rx_iov; recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); NS_PRV_IOVCNT(iovb) = 0; spin_lock_irqsave(&card->int_lock, flags); recycle_iov_buf(card, iovb); spin_unlock_irqrestore(&card->int_lock, flags); vc->rx_iov = NULL; } } if (vcc->qos.txtp.traffic_class != ATM_NONE) { vc->tx = 0; } if (vcc->qos.txtp.traffic_class == ATM_CBR) { unsigned long flags; ns_scqe *scqep; scq_info *scq; scq = vc->scq; for (;;) { spin_lock_irqsave(&scq->lock, flags); scqep = scq->next; if (scqep == scq->base) scqep = scq->last; else scqep--; if (scqep == scq->tail) { spin_unlock_irqrestore(&scq->lock, flags); break; } /* If the last entry is not a TSR, place one in the SCQ in order to be able to completely drain it and then close. */ if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) { ns_scqe tsr; u32 scdi, scqi; u32 data; int index; tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; scqi = scq->next - scq->base; tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); tsr.word_3 = 0x00000000; tsr.word_4 = 0x00000000; *scq->next = tsr; index = (int)scqi; scq->skb[index] = NULL; if (scq->next == scq->last) scq->next = scq->base; else scq->next++; data = scq_virt_to_bus(scq, scq->next); ns_write_sram(card, scq->scd, &data, 1); } spin_unlock_irqrestore(&scq->lock, flags); schedule(); } /* Free all TST entries */ data = NS_TST_OPCODE_VARIABLE; for (i = 0; i < NS_TST_NUM_ENTRIES; i++) { if (card->tste2vc[i] == vc) { ns_write_sram(card, card->tst_addr + i, &data, 1); card->tste2vc[i] = NULL; card->tst_free_entries++; } } card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; free_scq(card, vc->scq, vcc); } /* remove all references to vcc before deleting it */ if (vcc->qos.txtp.traffic_class != ATM_NONE) { unsigned long flags; scq_info *scq = card->scq0; spin_lock_irqsave(&scq->lock, flags); for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { ATM_SKB(scq->skb[i])->vcc = NULL; atm_return(vcc, scq->skb[i]->truesize); PRINTK ("nicstar: deleted pending vcc mapping\n"); } } spin_unlock_irqrestore(&scq->lock, flags); } vcc->dev_data = NULL; clear_bit(ATM_VF_PARTIAL, &vcc->flags); clear_bit(ATM_VF_ADDR, &vcc->flags); #ifdef RX_DEBUG { u32 stat, cfg; stat = readl(card->membase + STAT); cfg = readl(card->membase + CFG); printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); printk ("TSQ: base = 0x%p next = 0x%p last = 0x%p TSQT = 0x%08X \n", card->tsq.base, card->tsq.next, card->tsq.last, readl(card->membase + TSQT)); printk ("RSQ: base = 0x%p next = 0x%p last = 0x%p RSQT = 0x%08X \n", card->rsq.base, card->rsq.next, card->rsq.last, readl(card->membase + RSQT)); printk("Empty free buffer queue interrupt %s \n", card->efbie ? "enabled" : "disabled"); printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", ns_stat_sfbqc_get(stat), card->sbpool.count, ns_stat_lfbqc_get(stat), card->lbpool.count); printk("hbpool.count = %d iovpool.count = %d \n", card->hbpool.count, card->iovpool.count); } #endif /* RX_DEBUG */ } static void fill_tst(ns_dev * card, int n, vc_map * vc) { u32 new_tst; unsigned long cl; int e, r; u32 data; /* It would be very complicated to keep the two TSTs synchronized while assuring that writes are only made to the inactive TST. So, for now I will use only one TST. If problems occur, I will change this again */ new_tst = card->tst_addr; /* Fill procedure */ for (e = 0; e < NS_TST_NUM_ENTRIES; e++) { if (card->tste2vc[e] == NULL) break; } if (e == NS_TST_NUM_ENTRIES) { printk("nicstar%d: No free TST entries found. \n", card->index); return; } r = n; cl = NS_TST_NUM_ENTRIES; data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); while (r > 0) { if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) { card->tste2vc[e] = vc; ns_write_sram(card, new_tst + e, &data, 1); cl -= NS_TST_NUM_ENTRIES; r--; } if (++e == NS_TST_NUM_ENTRIES) { e = 0; } cl += n; } /* End of fill procedure */ data = ns_tste_make(NS_TST_OPCODE_END, new_tst); ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); card->tst_addr = new_tst; } static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) { ns_dev *card; vc_map *vc; scq_info *scq; unsigned long buflen; ns_scqe scqe; u32 flags; /* TBD flags, not CPU flags */ card = vcc->dev->dev_data; TXPRINTK("nicstar%d: ns_send() called.\n", card->index); if ((vc = (vc_map *) vcc->dev_data) == NULL) { printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (!vc->tx) { printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (skb_shinfo(skb)->nr_frags != 0) { printk("nicstar%d: No scatter-gather yet.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } ATM_SKB(skb)->vcc = vcc; NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data, skb->len, PCI_DMA_TODEVICE); if (vcc->qos.aal == ATM_AAL5) { buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ flags = NS_TBD_AAL5; scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb)); scqe.word_3 = cpu_to_le32(skb->len); scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, ATM_SKB(skb)-> atm_options & ATM_ATMOPT_CLP ? 1 : 0); flags |= NS_TBD_EOPDU; } else { /* (vcc->qos.aal == ATM_AAL0) */ buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ flags = NS_TBD_AAL0; scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER); scqe.word_3 = cpu_to_le32(0x00000000); if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ flags |= NS_TBD_EOPDU; scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); /* Force the VPI/VCI to be the same as in VCC struct */ scqe.word_4 |= cpu_to_le32((((u32) vcc-> vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc-> vci) << NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK); } if (vcc->qos.txtp.traffic_class == ATM_CBR) { scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); scq = ((vc_map *) vcc->dev_data)->scq; } else { scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); scq = card->scq0; } if (push_scqe(card, vc, scq, &scqe, skb) != 0) { atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EIO; } atomic_inc(&vcc->stats->tx); return 0; } static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, struct sk_buff *skb) { unsigned long flags; ns_scqe tsr; u32 scdi, scqi; int scq_is_vbr; u32 data; int index; spin_lock_irqsave(&scq->lock, flags); while (scq->tail == scq->next) { if (in_interrupt()) { spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Error pushing TBD.\n", card->index); return 1; } scq->full = 1; spin_unlock_irqrestore(&scq->lock, flags); interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT); spin_lock_irqsave(&scq->lock, flags); if (scq->full) { spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Timeout pushing TBD.\n", card->index); return 1; } } *scq->next = *tbd; index = (int)(scq->next - scq->base); scq->skb[index] = skb; XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n", card->index, skb, index); XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), scq->next); if (scq->next == scq->last) scq->next = scq->base; else scq->next++; vc->tbd_count++; if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) { scq->tbd_count++; scq_is_vbr = 1; } else scq_is_vbr = 0; if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ) { int has_run = 0; while (scq->tail == scq->next) { if (in_interrupt()) { data = scq_virt_to_bus(scq, scq->next); ns_write_sram(card, scq->scd, &data, 1); spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Error pushing TSR.\n", card->index); return 0; } scq->full = 1; if (has_run++) break; spin_unlock_irqrestore(&scq->lock, flags); interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT); spin_lock_irqsave(&scq->lock, flags); } if (!scq->full) { tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); if (scq_is_vbr) scdi = NS_TSR_SCDISVBR; else scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; scqi = scq->next - scq->base; tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); tsr.word_3 = 0x00000000; tsr.word_4 = 0x00000000; *scq->next = tsr; index = (int)scqi; scq->skb[index] = NULL; XPRINTK ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n", card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4), scq->next); if (scq->next == scq->last) scq->next = scq->base; else scq->next++; vc->tbd_count = 0; scq->tbd_count = 0; } else PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index); } data = scq_virt_to_bus(scq, scq->next); ns_write_sram(card, scq->scd, &data, 1); spin_unlock_irqrestore(&scq->lock, flags); return 0; } static void process_tsq(ns_dev * card) { u32 scdi; scq_info *scq; ns_tsi *previous = NULL, *one_ahead, *two_ahead; int serviced_entries; /* flag indicating at least on entry was serviced */ serviced_entries = 0; if (card->tsq.next == card->tsq.last) one_ahead = card->tsq.base; else one_ahead = card->tsq.next + 1; if (one_ahead == card->tsq.last) two_ahead = card->tsq.base; else two_ahead = one_ahead + 1; while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || !ns_tsi_isempty(two_ahead)) /* At most two empty, as stated in the 77201 errata */ { serviced_entries = 1; /* Skip the one or two possible empty entries */ while (ns_tsi_isempty(card->tsq.next)) { if (card->tsq.next == card->tsq.last) card->tsq.next = card->tsq.base; else card->tsq.next++; } if (!ns_tsi_tmrof(card->tsq.next)) { scdi = ns_tsi_getscdindex(card->tsq.next); if (scdi == NS_TSI_SCDISVBR) scq = card->scq0; else { if (card->scd2vc[scdi] == NULL) { printk ("nicstar%d: could not find VC from SCD index.\n", card->index); ns_tsi_init(card->tsq.next); return; } scq = card->scd2vc[scdi]->scq; } drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); scq->full = 0; wake_up_interruptible(&(scq->scqfull_waitq)); } ns_tsi_init(card->tsq.next); previous = card->tsq.next; if (card->tsq.next == card->tsq.last) card->tsq.next = card->tsq.base; else card->tsq.next++; if (card->tsq.next == card->tsq.last) one_ahead = card->tsq.base; else one_ahead = card->tsq.next + 1; if (one_ahead == card->tsq.last) two_ahead = card->tsq.base; else two_ahead = one_ahead + 1; } if (serviced_entries) writel(PTR_DIFF(previous, card->tsq.base), card->membase + TSQH); } static void drain_scq(ns_dev * card, scq_info * scq, int pos) { struct atm_vcc *vcc; struct sk_buff *skb; int i; unsigned long flags; XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n", card->index, scq, pos); if (pos >= scq->num_entries) { printk("nicstar%d: Bad index on drain_scq().\n", card->index); return; } spin_lock_irqsave(&scq->lock, flags); i = (int)(scq->tail - scq->base); if (++i == scq->num_entries) i = 0; while (i != pos) { skb = scq->skb[i]; XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n", card->index, skb, i); if (skb != NULL) { pci_unmap_single(card->pcidev, NS_PRV_DMA(skb), skb->len, PCI_DMA_TODEVICE); vcc = ATM_SKB(skb)->vcc; if (vcc && vcc->pop != NULL) { vcc->pop(vcc, skb); } else { dev_kfree_skb_irq(skb); } scq->skb[i] = NULL; } if (++i == scq->num_entries) i = 0; } scq->tail = scq->base + pos; spin_unlock_irqrestore(&scq->lock, flags); } static void process_rsq(ns_dev * card) { ns_rsqe *previous; if (!ns_rsqe_valid(card->rsq.next)) return; do { dequeue_rx(card, card->rsq.next); ns_rsqe_init(card->rsq.next); previous = card->rsq.next; if (card->rsq.next == card->rsq.last) card->rsq.next = card->rsq.base; else card->rsq.next++; } while (ns_rsqe_valid(card->rsq.next)); writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH); } static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe) { u32 vpi, vci; vc_map *vc; struct sk_buff *iovb; struct iovec *iov; struct atm_vcc *vcc; struct sk_buff *skb; unsigned short aal5_len; int len; u32 stat; u32 id; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); id = le32_to_cpu(rsqe->buffer_handle); skb = idr_find(&card->idr, id); if (!skb) { RXPRINTK(KERN_ERR "nicstar%d: idr_find() failed!\n", card->index); return; } idr_remove(&card->idr, id); pci_dma_sync_single_for_cpu(card->pcidev, NS_PRV_DMA(skb), (NS_PRV_BUFTYPE(skb) == BUF_SM ? NS_SMSKBSIZE : NS_LGSKBSIZE), PCI_DMA_FROMDEVICE); pci_unmap_single(card->pcidev, NS_PRV_DMA(skb), (NS_PRV_BUFTYPE(skb) == BUF_SM ? NS_SMSKBSIZE : NS_LGSKBSIZE), PCI_DMA_FROMDEVICE); vpi = ns_rsqe_vpi(rsqe); vci = ns_rsqe_vci(rsqe); if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", card->index, vpi, vci); recycle_rx_buf(card, skb); return; } vc = &(card->vcmap[vpi << card->vcibits | vci]); if (!vc->rx) { RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", card->index, vpi, vci); recycle_rx_buf(card, skb); return; } vcc = vc->rx_vcc; if (vcc->qos.aal == ATM_AAL0) { struct sk_buff *sb; unsigned char *cell; int i; cell = skb->data; for (i = ns_rsqe_cellcount(rsqe); i; i--) { if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) { printk ("nicstar%d: Can't allocate buffers for aal0.\n", card->index); atomic_add(i, &vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK ("nicstar%d: atm_charge() dropped aal0 packets.\n", card->index); atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ dev_kfree_skb_any(sb); break; } /* Rebuild the header */ *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); if (i == 1 && ns_rsqe_eopdu(rsqe)) *((u32 *) sb->data) |= 0x00000002; skb_put(sb, NS_AAL0_HEADER); memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); skb_put(sb, ATM_CELL_PAYLOAD); ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); atomic_inc(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } recycle_rx_buf(card, skb); return; } /* To reach this point, the AAL layer can only be AAL5 */ if ((iovb = vc->rx_iov) == NULL) { iovb = skb_dequeue(&(card->iovpool.queue)); if (iovb == NULL) { /* No buffers in the queue */ iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); if (iovb == NULL) { printk("nicstar%d: Out of iovec buffers.\n", card->index); atomic_inc(&vcc->stats->rx_drop); recycle_rx_buf(card, skb); return; } NS_PRV_BUFTYPE(iovb) = BUF_NONE; } else if (--card->iovpool.count < card->iovnr.min) { struct sk_buff *new_iovb; if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) { NS_PRV_BUFTYPE(iovb) = BUF_NONE; skb_queue_tail(&card->iovpool.queue, new_iovb); card->iovpool.count++; } } vc->rx_iov = iovb; NS_PRV_IOVCNT(iovb) = 0; iovb->len = 0; iovb->data = iovb->head; skb_reset_tail_pointer(iovb); /* IMPORTANT: a pointer to the sk_buff containing the small or large buffer is stored as iovec base, NOT a pointer to the small or large buffer itself. */ } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { printk("nicstar%d: received too big AAL5 SDU.\n", card->index); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_MAX_IOVECS); NS_PRV_IOVCNT(iovb) = 0; iovb->len = 0; iovb->data = iovb->head; skb_reset_tail_pointer(iovb); } iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++]; iov->iov_base = (void *)skb; iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; iovb->len += iov->iov_len; #ifdef EXTRA_DEBUG if (NS_PRV_IOVCNT(iovb) == 1) { if (NS_PRV_BUFTYPE(skb) != BUF_SM) { printk ("nicstar%d: Expected a small buffer, and this is not one.\n", card->index); which_list(card, skb); atomic_inc(&vcc->stats->rx_err); recycle_rx_buf(card, skb); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } } else { /* NS_PRV_IOVCNT(iovb) >= 2 */ if (NS_PRV_BUFTYPE(skb) != BUF_LG) { printk ("nicstar%d: Expected a large buffer, and this is not one.\n", card->index); which_list(card, skb); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } } #endif /* EXTRA_DEBUG */ if (ns_rsqe_eopdu(rsqe)) { /* This works correctly regardless of the endianness of the host */ unsigned char *L1L2 = (unsigned char *) (skb->data + iov->iov_len - 6); aal5_len = L1L2[0] << 8 | L1L2[1]; len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; if (ns_rsqe_crcerr(rsqe) || len + 8 > iovb->len || len + (47 + 8) < iovb->len) { printk("nicstar%d: AAL5 CRC error", card->index); if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) printk(" - PDU size mismatch.\n"); else printk(".\n"); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } /* By this point we (hopefully) have a complete SDU without errors. */ if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */ /* skb points to a small buffer */ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); atomic_inc(&vcc->stats->rx_drop); } else { skb_put(skb, len); dequeue_sm_buf(card, skb); #ifdef NS_USE_DESTRUCTORS skb->destructor = ns_sb_destructor; #endif /* NS_USE_DESTRUCTORS */ ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); } } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */ struct sk_buff *sb; sb = (struct sk_buff *)(iov - 1)->iov_base; /* skb points to a large buffer */ if (len <= NS_SMBUFSIZE) { if (!atm_charge(vcc, sb->truesize)) { push_rxbufs(card, sb); atomic_inc(&vcc->stats->rx_drop); } else { skb_put(sb, len); dequeue_sm_buf(card, sb); #ifdef NS_USE_DESTRUCTORS sb->destructor = ns_sb_destructor; #endif /* NS_USE_DESTRUCTORS */ ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); atomic_inc(&vcc->stats->rx); } push_rxbufs(card, skb); } else { /* len > NS_SMBUFSIZE, the usual case */ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); atomic_inc(&vcc->stats->rx_drop); } else { dequeue_lg_buf(card, skb); #ifdef NS_USE_DESTRUCTORS skb->destructor = ns_lb_destructor; #endif /* NS_USE_DESTRUCTORS */ skb_push(skb, NS_SMBUFSIZE); skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE); skb_put(skb, len - NS_SMBUFSIZE); ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); } push_rxbufs(card, sb); } } else { /* Must push a huge buffer */ struct sk_buff *hb, *sb, *lb; int remaining, tocopy; int j; hb = skb_dequeue(&(card->hbpool.queue)); if (hb == NULL) { /* No buffers in the queue */ hb = dev_alloc_skb(NS_HBUFSIZE); if (hb == NULL) { printk ("nicstar%d: Out of huge buffers.\n", card->index); atomic_inc(&vcc->stats->rx_drop); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } else if (card->hbpool.count < card->hbnr.min) { struct sk_buff *new_hb; if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { skb_queue_tail(&card->hbpool. queue, new_hb); card->hbpool.count++; } } NS_PRV_BUFTYPE(hb) = BUF_NONE; } else if (--card->hbpool.count < card->hbnr.min) { struct sk_buff *new_hb; if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { NS_PRV_BUFTYPE(new_hb) = BUF_NONE; skb_queue_tail(&card->hbpool.queue, new_hb); card->hbpool.count++; } if (card->hbpool.count < card->hbnr.min) { if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { NS_PRV_BUFTYPE(new_hb) = BUF_NONE; skb_queue_tail(&card->hbpool. queue, new_hb); card->hbpool.count++; } } } iov = (struct iovec *)iovb->data; if (!atm_charge(vcc, hb->truesize)) { recycle_iovec_rx_bufs(card, iov, NS_PRV_IOVCNT(iovb)); if (card->hbpool.count < card->hbnr.max) { skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } else dev_kfree_skb_any(hb); atomic_inc(&vcc->stats->rx_drop); } else { /* Copy the small buffer to the huge buffer */ sb = (struct sk_buff *)iov->iov_base; skb_copy_from_linear_data(sb, hb->data, iov->iov_len); skb_put(hb, iov->iov_len); remaining = len - iov->iov_len; iov++; /* Free the small buffer */ push_rxbufs(card, sb); /* Copy all large buffers to the huge buffer and free them */ for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) { lb = (struct sk_buff *)iov->iov_base; tocopy = min_t(int, remaining, iov->iov_len); skb_copy_from_linear_data(lb, skb_tail_pointer (hb), tocopy); skb_put(hb, tocopy); iov++; remaining -= tocopy; push_rxbufs(card, lb); } #ifdef EXTRA_DEBUG if (remaining != 0 || hb->len != len) printk ("nicstar%d: Huge buffer len mismatch.\n", card->index); #endif /* EXTRA_DEBUG */ ATM_SKB(hb)->vcc = vcc; #ifdef NS_USE_DESTRUCTORS hb->destructor = ns_hb_destructor; #endif /* NS_USE_DESTRUCTORS */ __net_timestamp(hb); vcc->push(vcc, hb); atomic_inc(&vcc->stats->rx); } } vc->rx_iov = NULL; recycle_iov_buf(card, iovb); } } #ifdef NS_USE_DESTRUCTORS static void ns_sb_destructor(struct sk_buff *sb) { ns_dev *card; u32 stat; card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); do { sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) break; NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } while (card->sbfqc < card->sbnr.min); } static void ns_lb_destructor(struct sk_buff *lb) { ns_dev *card; u32 stat; card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); do { lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) break; NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } while (card->lbfqc < card->lbnr.min); } static void ns_hb_destructor(struct sk_buff *hb) { ns_dev *card; card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data; while (card->hbpool.count < card->hbnr.init) { hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) break; NS_PRV_BUFTYPE(hb) = BUF_NONE; skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } } #endif /* NS_USE_DESTRUCTORS */ static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb) { if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) { printk("nicstar%d: What kind of rx buffer is this?\n", card->index); dev_kfree_skb_any(skb); } else push_rxbufs(card, skb); } static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count) { while (count-- > 0) recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base); } static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb) { if (card->iovpool.count < card->iovnr.max) { skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; } else dev_kfree_skb_any(iovb); } static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb) { skb_unlink(sb, &card->sbpool.queue); #ifdef NS_USE_DESTRUCTORS if (card->sbfqc < card->sbnr.min) #else if (card->sbfqc < card->sbnr.init) { struct sk_buff *new_sb; if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, new_sb); skb_reserve(new_sb, NS_AAL0_HEADER); push_rxbufs(card, new_sb); } } if (card->sbfqc < card->sbnr.init) #endif /* NS_USE_DESTRUCTORS */ { struct sk_buff *new_sb; if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, new_sb); skb_reserve(new_sb, NS_AAL0_HEADER); push_rxbufs(card, new_sb); } } } static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb) { skb_unlink(lb, &card->lbpool.queue); #ifdef NS_USE_DESTRUCTORS if (card->lbfqc < card->lbnr.min) #else if (card->lbfqc < card->lbnr.init) { struct sk_buff *new_lb; if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, new_lb); skb_reserve(new_lb, NS_SMBUFSIZE); push_rxbufs(card, new_lb); } } if (card->lbfqc < card->lbnr.init) #endif /* NS_USE_DESTRUCTORS */ { struct sk_buff *new_lb; if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { NS_PRV_BUFTYPE(new_lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, new_lb); skb_reserve(new_lb, NS_SMBUFSIZE); push_rxbufs(card, new_lb); } } } static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page) { u32 stat; ns_dev *card; int left; left = (int)*pos; card = (ns_dev *) dev->dev_data; stat = readl(card->membase + STAT); if (!left--) return sprintf(page, "Pool count min init max \n"); if (!left--) return sprintf(page, "Small %5d %5d %5d %5d \n", ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init, card->sbnr.max); if (!left--) return sprintf(page, "Large %5d %5d %5d %5d \n", ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init, card->lbnr.max); if (!left--) return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count, card->hbnr.min, card->hbnr.init, card->hbnr.max); if (!left--) return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count, card->iovnr.min, card->iovnr.init, card->iovnr.max); if (!left--) { int retval; retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt); card->intcnt = 0; return retval; } #if 0 /* Dump 25.6 Mbps PHY registers */ /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it here just in case it's needed for debugging. */ if (card->max_pcr == ATM_25_PCR && !left--) { u32 phy_regs[4]; u32 i; for (i = 0; i < 4; i++) { while (CMD_BUSY(card)) ; writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD); while (CMD_BUSY(card)) ; phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; } return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]); } #endif /* 0 - Dump 25.6 Mbps PHY registers */ #if 0 /* Dump TST */ if (left-- < NS_TST_NUM_ENTRIES) { if (card->tste2vc[left + 1] == NULL) return sprintf(page, "%5d - VBR/UBR \n", left + 1); else return sprintf(page, "%5d - %d %d \n", left + 1, card->tste2vc[left + 1]->tx_vcc->vpi, card->tste2vc[left + 1]->tx_vcc->vci); } #endif /* 0 */ return 0; } static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg) { ns_dev *card; pool_levels pl; long btype; unsigned long flags; card = dev->dev_data; switch (cmd) { case NS_GETPSTAT: if (get_user (pl.buftype, &((pool_levels __user *) arg)->buftype)) return -EFAULT; switch (pl.buftype) { case NS_BUFTYPE_SMALL: pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT)); pl.level.min = card->sbnr.min; pl.level.init = card->sbnr.init; pl.level.max = card->sbnr.max; break; case NS_BUFTYPE_LARGE: pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT)); pl.level.min = card->lbnr.min; pl.level.init = card->lbnr.init; pl.level.max = card->lbnr.max; break; case NS_BUFTYPE_HUGE: pl.count = card->hbpool.count; pl.level.min = card->hbnr.min; pl.level.init = card->hbnr.init; pl.level.max = card->hbnr.max; break; case NS_BUFTYPE_IOVEC: pl.count = card->iovpool.count; pl.level.min = card->iovnr.min; pl.level.init = card->iovnr.init; pl.level.max = card->iovnr.max; break; default: return -ENOIOCTLCMD; } if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) return (sizeof(pl)); else return -EFAULT; case NS_SETBUFLEV: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) return -EFAULT; if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max) return -EINVAL; if (pl.level.min == 0) return -EINVAL; switch (pl.buftype) { case NS_BUFTYPE_SMALL: if (pl.level.max > TOP_SB) return -EINVAL; card->sbnr.min = pl.level.min; card->sbnr.init = pl.level.init; card->sbnr.max = pl.level.max; break; case NS_BUFTYPE_LARGE: if (pl.level.max > TOP_LB) return -EINVAL; card->lbnr.min = pl.level.min; card->lbnr.init = pl.level.init; card->lbnr.max = pl.level.max; break; case NS_BUFTYPE_HUGE: if (pl.level.max > TOP_HB) return -EINVAL; card->hbnr.min = pl.level.min; card->hbnr.init = pl.level.init; card->hbnr.max = pl.level.max; break; case NS_BUFTYPE_IOVEC: if (pl.level.max > TOP_IOVB) return -EINVAL; card->iovnr.min = pl.level.min; card->iovnr.init = pl.level.init; card->iovnr.max = pl.level.max; break; default: return -EINVAL; } return 0; case NS_ADJBUFLEV: if (!capable(CAP_NET_ADMIN)) return -EPERM; btype = (long)arg; /* a long is the same size as a pointer or bigger */ switch (btype) { case NS_BUFTYPE_SMALL: while (card->sbfqc < card->sbnr.init) { struct sk_buff *sb; sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(sb) = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } break; case NS_BUFTYPE_LARGE: while (card->lbfqc < card->lbnr.init) { struct sk_buff *lb; lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(lb) = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } break; case NS_BUFTYPE_HUGE: while (card->hbpool.count > card->hbnr.init) { struct sk_buff *hb; spin_lock_irqsave(&card->int_lock, flags); hb = skb_dequeue(&card->hbpool.queue); card->hbpool.count--; spin_unlock_irqrestore(&card->int_lock, flags); if (hb == NULL) printk ("nicstar%d: huge buffer count inconsistent.\n", card->index); else dev_kfree_skb_any(hb); } while (card->hbpool.count < card->hbnr.init) { struct sk_buff *hb; hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(hb) = BUF_NONE; spin_lock_irqsave(&card->int_lock, flags); skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; spin_unlock_irqrestore(&card->int_lock, flags); } break; case NS_BUFTYPE_IOVEC: while (card->iovpool.count > card->iovnr.init) { struct sk_buff *iovb; spin_lock_irqsave(&card->int_lock, flags); iovb = skb_dequeue(&card->iovpool.queue); card->iovpool.count--; spin_unlock_irqrestore(&card->int_lock, flags); if (iovb == NULL) printk ("nicstar%d: iovec buffer count inconsistent.\n", card->index); else dev_kfree_skb_any(iovb); } while (card->iovpool.count < card->iovnr.init) { struct sk_buff *iovb; iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); if (iovb == NULL) return -ENOMEM; NS_PRV_BUFTYPE(iovb) = BUF_NONE; spin_lock_irqsave(&card->int_lock, flags); skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; spin_unlock_irqrestore(&card->int_lock, flags); } break; default: return -EINVAL; } return 0; default: if (dev->phy && dev->phy->ioctl) { return dev->phy->ioctl(dev, cmd, arg); } else { printk("nicstar%d: %s == NULL \n", card->index, dev->phy ? "dev->phy->ioctl" : "dev->phy"); return -ENOIOCTLCMD; } } } #ifdef EXTRA_DEBUG static void which_list(ns_dev * card, struct sk_buff *skb) { printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb)); } #endif /* EXTRA_DEBUG */ static void ns_poll(unsigned long arg) { int i; ns_dev *card; unsigned long flags; u32 stat_r, stat_w; PRINTK("nicstar: Entering ns_poll().\n"); for (i = 0; i < num_cards; i++) { card = cards[i]; if (spin_is_locked(&card->int_lock)) { /* Probably it isn't worth spinning */ continue; } spin_lock_irqsave(&card->int_lock, flags); stat_w = 0; stat_r = readl(card->membase + STAT); if (stat_r & NS_STAT_TSIF) stat_w |= NS_STAT_TSIF; if (stat_r & NS_STAT_EOPDU) stat_w |= NS_STAT_EOPDU; process_tsq(card); process_rsq(card); writel(stat_w, card->membase + STAT); spin_unlock_irqrestore(&card->int_lock, flags); } mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); PRINTK("nicstar: Leaving ns_poll().\n"); } static int ns_parse_mac(char *mac, unsigned char *esi) { int i, j; short byte1, byte0; if (mac == NULL || esi == NULL) return -1; j = 0; for (i = 0; i < 6; i++) { if ((byte1 = hex_to_bin(mac[j++])) < 0) return -1; if ((byte0 = hex_to_bin(mac[j++])) < 0) return -1; esi[i] = (unsigned char)(byte1 * 16 + byte0); if (i < 5) { if (mac[j++] != ':') return -1; } } return 0; } static void ns_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr) { ns_dev *card; unsigned long flags; card = dev->dev_data; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel((u32) value, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); } static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr) { ns_dev *card; unsigned long flags; u32 data; card = dev->dev_data; spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)) ; writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), card->membase + CMD); while (CMD_BUSY(card)) ; data = readl(card->membase + DR0) & 0x000000FF; spin_unlock_irqrestore(&card->res_lock, flags); return (unsigned char)data; } module_init(nicstar_init); module_exit(nicstar_cleanup);
gpl-2.0
npeacock/android_kernel_mediatek_mt6575
arch/x86/kernel/i8259.c
2465
10378
#include <linux/linkage.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/timex.h> #include <linux/random.h> #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/syscore_ops.h> #include <linux/bitops.h> #include <linux/acpi.h> #include <linux/io.h> #include <linux/delay.h> #include <asm/atomic.h> #include <asm/system.h> #include <asm/timer.h> #include <asm/hw_irq.h> #include <asm/pgtable.h> #include <asm/desc.h> #include <asm/apic.h> #include <asm/i8259.h> /* * This is the 'legacy' 8259A Programmable Interrupt Controller, * present in the majority of PC/AT boxes. * plus some generic x86 specific things if generic specifics makes * any sense at all. */ static void init_8259A(int auto_eoi); static int i8259A_auto_eoi; DEFINE_RAW_SPINLOCK(i8259A_lock); /* * 8259A PIC functions to handle ISA devices: */ /* * This contains the irq mask for both 8259A irq controllers, */ unsigned int cached_irq_mask = 0xffff; /* * Not all IRQs can be routed through the IO-APIC, eg. on certain (older) * boards the timer interrupt is not really connected to any IO-APIC pin, * it's fed to the master 8259A's IR0 line only. * * Any '1' bit in this mask means the IRQ is routed through the IO-APIC. * this 'mixed mode' IRQ handling costs nothing because it's only used * at IRQ setup time. */ unsigned long io_apic_irqs; static void mask_8259A_irq(unsigned int irq) { unsigned int mask = 1 << irq; unsigned long flags; raw_spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask |= mask; if (irq & 8) outb(cached_slave_mask, PIC_SLAVE_IMR); else outb(cached_master_mask, PIC_MASTER_IMR); raw_spin_unlock_irqrestore(&i8259A_lock, flags); } static void disable_8259A_irq(struct irq_data *data) { mask_8259A_irq(data->irq); } static void unmask_8259A_irq(unsigned int irq) { unsigned int mask = ~(1 << irq); unsigned long flags; raw_spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask &= mask; if (irq & 8) outb(cached_slave_mask, PIC_SLAVE_IMR); else outb(cached_master_mask, PIC_MASTER_IMR); raw_spin_unlock_irqrestore(&i8259A_lock, flags); } static void enable_8259A_irq(struct irq_data *data) { unmask_8259A_irq(data->irq); } static int i8259A_irq_pending(unsigned int irq) { unsigned int mask = 1<<irq; unsigned long flags; int ret; raw_spin_lock_irqsave(&i8259A_lock, flags); if (irq < 8) ret = inb(PIC_MASTER_CMD) & mask; else ret = inb(PIC_SLAVE_CMD) & (mask >> 8); raw_spin_unlock_irqrestore(&i8259A_lock, flags); return ret; } static void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); io_apic_irqs &= ~(1<<irq); irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, i8259A_chip.name); enable_irq(irq); } /* * This function assumes to be called rarely. Switching between * 8259A registers is slow. * This has to be protected by the irq controller spinlock * before being called. */ static inline int i8259A_irq_real(unsigned int irq) { int value; int irqmask = 1<<irq; if (irq < 8) { outb(0x0B, PIC_MASTER_CMD); /* ISR register */ value = inb(PIC_MASTER_CMD) & irqmask; outb(0x0A, PIC_MASTER_CMD); /* back to the IRR register */ return value; } outb(0x0B, PIC_SLAVE_CMD); /* ISR register */ value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); outb(0x0A, PIC_SLAVE_CMD); /* back to the IRR register */ return value; } /* * Careful! The 8259A is a fragile beast, it pretty * much _has_ to be done exactly like this (mask it * first, _then_ send the EOI, and the order of EOI * to the two 8259s is important! */ static void mask_and_ack_8259A(struct irq_data *data) { unsigned int irq = data->irq; unsigned int irqmask = 1 << irq; unsigned long flags; raw_spin_lock_irqsave(&i8259A_lock, flags); /* * Lightweight spurious IRQ detection. We do not want * to overdo spurious IRQ handling - it's usually a sign * of hardware problems, so we only do the checks we can * do without slowing down good hardware unnecessarily. * * Note that IRQ7 and IRQ15 (the two spurious IRQs * usually resulting from the 8259A-1|2 PICs) occur * even if the IRQ is masked in the 8259A. Thus we * can check spurious 8259A IRQs without doing the * quite slow i8259A_irq_real() call for every IRQ. * This does not cover 100% of spurious interrupts, * but should be enough to warn the user that there * is something bad going on ... */ if (cached_irq_mask & irqmask) goto spurious_8259A_irq; cached_irq_mask |= irqmask; handle_real_irq: if (irq & 8) { inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ outb(cached_slave_mask, PIC_SLAVE_IMR); /* 'Specific EOI' to slave */ outb(0x60+(irq&7), PIC_SLAVE_CMD); /* 'Specific EOI' to master-IRQ2 */ outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); } else { inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ outb(cached_master_mask, PIC_MASTER_IMR); outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ } raw_spin_unlock_irqrestore(&i8259A_lock, flags); return; spurious_8259A_irq: /* * this is the slow path - should happen rarely. */ if (i8259A_irq_real(irq)) /* * oops, the IRQ _is_ in service according to the * 8259A - not spurious, go handle it. */ goto handle_real_irq; { static int spurious_irq_mask; /* * At this point we can be sure the IRQ is spurious, * lets ACK and report it. [once per IRQ] */ if (!(spurious_irq_mask & irqmask)) { printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } atomic_inc(&irq_err_count); /* * Theoretically we do not have to handle this IRQ, * but in Linux this does not cause problems and is * simpler for us. */ goto handle_real_irq; } } struct irq_chip i8259A_chip = { .name = "XT-PIC", .irq_mask = disable_8259A_irq, .irq_disable = disable_8259A_irq, .irq_unmask = enable_8259A_irq, .irq_mask_ack = mask_and_ack_8259A, }; static char irq_trigger[2]; /** * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ */ static void restore_ELCR(char *trigger) { outb(trigger[0], 0x4d0); outb(trigger[1], 0x4d1); } static void save_ELCR(char *trigger) { /* IRQ 0,1,2,8,13 are marked as reserved */ trigger[0] = inb(0x4d0) & 0xF8; trigger[1] = inb(0x4d1) & 0xDE; } static void i8259A_resume(void) { init_8259A(i8259A_auto_eoi); restore_ELCR(irq_trigger); } static int i8259A_suspend(void) { save_ELCR(irq_trigger); return 0; } static void i8259A_shutdown(void) { /* Put the i8259A into a quiescent state that * the kernel initialization code can get it * out of. */ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ } static struct syscore_ops i8259_syscore_ops = { .suspend = i8259A_suspend, .resume = i8259A_resume, .shutdown = i8259A_shutdown, }; static void mask_8259A(void) { unsigned long flags; raw_spin_lock_irqsave(&i8259A_lock, flags); outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ raw_spin_unlock_irqrestore(&i8259A_lock, flags); } static void unmask_8259A(void) { unsigned long flags; raw_spin_lock_irqsave(&i8259A_lock, flags); outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ raw_spin_unlock_irqrestore(&i8259A_lock, flags); } static void init_8259A(int auto_eoi) { unsigned long flags; i8259A_auto_eoi = auto_eoi; raw_spin_lock_irqsave(&i8259A_lock, flags); outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ /* * outb_pic - this has to work on a wide range of PC hardware. */ outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 on x86-64, to 0x20-0x27 on i386 */ outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); if (auto_eoi) /* master does Auto EOI */ outb_pic(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); else /* master expects normal EOI */ outb_pic(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); outb_pic(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ /* ICW2: 8259A-2 IR0-7 mapped to IRQ8_VECTOR */ outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); if (auto_eoi) /* * In AEOI mode we just have to mask the interrupt * when acking. */ i8259A_chip.irq_mask_ack = disable_8259A_irq; else i8259A_chip.irq_mask_ack = mask_and_ack_8259A; udelay(100); /* wait for 8259A to initialize */ outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ raw_spin_unlock_irqrestore(&i8259A_lock, flags); } /* * make i8259 a driver so that we can select pic functions at run time. the goal * is to make x86 binary compatible among pc compatible and non-pc compatible * platforms, such as x86 MID. */ static void legacy_pic_noop(void) { }; static void legacy_pic_uint_noop(unsigned int unused) { }; static void legacy_pic_int_noop(int unused) { }; static int legacy_pic_irq_pending_noop(unsigned int irq) { return 0; } struct legacy_pic null_legacy_pic = { .nr_legacy_irqs = 0, .chip = &dummy_irq_chip, .mask = legacy_pic_uint_noop, .unmask = legacy_pic_uint_noop, .mask_all = legacy_pic_noop, .restore_mask = legacy_pic_noop, .init = legacy_pic_int_noop, .irq_pending = legacy_pic_irq_pending_noop, .make_irq = legacy_pic_uint_noop, }; struct legacy_pic default_legacy_pic = { .nr_legacy_irqs = NR_IRQS_LEGACY, .chip = &i8259A_chip, .mask = mask_8259A_irq, .unmask = unmask_8259A_irq, .mask_all = mask_8259A, .restore_mask = unmask_8259A, .init = init_8259A, .irq_pending = i8259A_irq_pending, .make_irq = make_8259A_irq, }; struct legacy_pic *legacy_pic = &default_legacy_pic; static int __init i8259A_init_ops(void) { if (legacy_pic == &default_legacy_pic) register_syscore_ops(&i8259_syscore_ops); return 0; } device_initcall(i8259A_init_ops);
gpl-2.0
Euphoria-OS-Devices/android_kernel_lge_msm8974
net/8021q/vlanproc.c
5025
8675
/****************************************************************************** * vlanproc.c VLAN Module. /proc filesystem interface. * * This module is completely hardware-independent and provides * access to the router using Linux /proc filesystem. * * Author: Ben Greear, <greearb@candelatech.com> coppied from wanproc.c * by: Gene Kozin <genek@compuserve.com> * * Copyright: (c) 1998 Ben Greear * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * ============================================================================ * Jan 20, 1998 Ben Greear Initial Version *****************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include "vlanproc.h" #include "vlan.h" /****** Function Prototypes *************************************************/ /* Methods for preparing data for reading proc entries */ static int vlan_seq_show(struct seq_file *seq, void *v); static void *vlan_seq_start(struct seq_file *seq, loff_t *pos); static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos); static void vlan_seq_stop(struct seq_file *seq, void *); static int vlandev_seq_show(struct seq_file *seq, void *v); /* * Global Data */ /* * Names of the proc directory entries */ static const char name_root[] = "vlan"; static const char name_conf[] = "config"; /* * Structures for interfacing with the /proc filesystem. * VLAN creates its own directory /proc/net/vlan with the following * entries: * config device status/configuration * <device> entry for each device */ /* * Generic /proc/net/vlan/<file> file and inode operations */ static const struct seq_operations vlan_seq_ops = { .start = vlan_seq_start, .next = vlan_seq_next, .stop = vlan_seq_stop, .show = vlan_seq_show, }; static int vlan_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &vlan_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations vlan_fops = { .owner = THIS_MODULE, .open = vlan_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; /* * /proc/net/vlan/<device> file and inode operations */ static int vlandev_seq_open(struct inode *inode, struct file *file) { return single_open(file, vlandev_seq_show, PDE(inode)->data); } static const struct file_operations vlandev_fops = { .owner = THIS_MODULE, .open = vlandev_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * Proc filesystem derectory entries. */ /* Strings */ static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = { [VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID", [VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD", [VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD", [VLAN_NAME_TYPE_PLUS_VID] = "VLAN_NAME_TYPE_PLUS_VID", }; /* * Interface functions */ /* * Clean up /proc/net/vlan entries */ void vlan_proc_cleanup(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); if (vn->proc_vlan_conf) remove_proc_entry(name_conf, vn->proc_vlan_dir); if (vn->proc_vlan_dir) proc_net_remove(net, name_root); /* Dynamically added entries should be cleaned up as their vlan_device * is removed, so we should not have to take care of it here... */ } /* * Create /proc/net/vlan entries */ int __net_init vlan_proc_init(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net); if (!vn->proc_vlan_dir) goto err; vn->proc_vlan_conf = proc_create(name_conf, S_IFREG|S_IRUSR|S_IWUSR, vn->proc_vlan_dir, &vlan_fops); if (!vn->proc_vlan_conf) goto err; return 0; err: pr_err("can't create entry in proc filesystem!\n"); vlan_proc_cleanup(net); return -ENOBUFS; } /* * Add directory entry for VLAN device. */ int vlan_proc_add_dev(struct net_device *vlandev) { struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id); vlan->dent = proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR, vn->proc_vlan_dir, &vlandev_fops, vlandev); if (!vlan->dent) return -ENOBUFS; return 0; } /* * Delete directory entry for VLAN device. */ int vlan_proc_rem_dev(struct net_device *vlandev) { struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id); /** NOTE: This will consume the memory pointed to by dent, it seems. */ if (vlan_dev_priv(vlandev)->dent) { remove_proc_entry(vlan_dev_priv(vlandev)->dent->name, vn->proc_vlan_dir); vlan_dev_priv(vlandev)->dent = NULL; } return 0; } /****** Proc filesystem entry points ****************************************/ /* * The following few functions build the content of /proc/net/vlan/config */ /* start read of /proc/net/vlan/config */ static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) __acquires(rcu) { struct net_device *dev; struct net *net = seq_file_net(seq); loff_t i = 1; rcu_read_lock(); if (*pos == 0) return SEQ_START_TOKEN; for_each_netdev_rcu(net, dev) { if (!is_vlan_dev(dev)) continue; if (i++ == *pos) return dev; } return NULL; } static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net_device *dev; struct net *net = seq_file_net(seq); ++*pos; dev = v; if (v == SEQ_START_TOKEN) dev = net_device_entry(&net->dev_base_head); for_each_netdev_continue_rcu(net, dev) { if (!is_vlan_dev(dev)) continue; return dev; } return NULL; } static void vlan_seq_stop(struct seq_file *seq, void *v) __releases(rcu) { rcu_read_unlock(); } static int vlan_seq_show(struct seq_file *seq, void *v) { struct net *net = seq_file_net(seq); struct vlan_net *vn = net_generic(net, vlan_net_id); if (v == SEQ_START_TOKEN) { const char *nmtype = NULL; seq_puts(seq, "VLAN Dev name | VLAN ID\n"); if (vn->name_type < ARRAY_SIZE(vlan_name_type_str)) nmtype = vlan_name_type_str[vn->name_type]; seq_printf(seq, "Name-Type: %s\n", nmtype ? nmtype : "UNKNOWN"); } else { const struct net_device *vlandev = v; const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); seq_printf(seq, "%-15s| %d | %s\n", vlandev->name, vlan->vlan_id, vlan->real_dev->name); } return 0; } static int vlandev_seq_show(struct seq_file *seq, void *offset) { struct net_device *vlandev = (struct net_device *) seq->private; const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats; static const char fmt64[] = "%30s %12llu\n"; int i; if (!is_vlan_dev(vlandev)) return 0; stats = dev_get_stats(vlandev, &temp); seq_printf(seq, "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n", vlandev->name, vlan->vlan_id, (int)(vlan->flags & 1), vlandev->priv_flags); seq_printf(seq, fmt64, "total frames received", stats->rx_packets); seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes); seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast); seq_puts(seq, "\n"); seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets); seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); seq_printf(seq, "Device: %s", vlan->real_dev->name); /* now show all PRIORITY mappings relating to this VLAN */ seq_printf(seq, "\nINGRESS priority mappings: " "0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n", vlan->ingress_priority_map[0], vlan->ingress_priority_map[1], vlan->ingress_priority_map[2], vlan->ingress_priority_map[3], vlan->ingress_priority_map[4], vlan->ingress_priority_map[5], vlan->ingress_priority_map[6], vlan->ingress_priority_map[7]); seq_printf(seq, " EGRESS priority mappings: "); for (i = 0; i < 16; i++) { const struct vlan_priority_tci_mapping *mp = vlan->egress_priority_map[i]; while (mp) { seq_printf(seq, "%u:%hu ", mp->priority, ((mp->vlan_qos >> 13) & 0x7)); mp = mp->next; } } seq_puts(seq, "\n"); return 0; }
gpl-2.0
zlatinski/p-android-omap-3.4-android-jb-new-ion
crypto/deflate.c
7585
5555
/* * Cryptographic API. * * Deflate algorithm (RFC 1951), implemented here primarily for use * by IPCOMP (RFC 3173 & RFC 2394). * * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * FIXME: deflate transforms will require up to a total of about 436k of kernel * memory on i386 (390k for compression, the rest for decompression), as the * current zlib kernel code uses a worst case pre-allocation system by default. * This needs to be fixed so that the amount of memory required is properly * related to the winbits and memlevel parameters. * * The default winbits of 11 should suit most packets, and it may be something * to configure on a per-tfm basis in the future. * * Currently, compression history is not maintained between tfm calls, as * it is not needed for IPCOMP and keeps the code simpler. It can be * implemented if someone wants it. */ #include <linux/init.h> #include <linux/module.h> #include <linux/crypto.h> #include <linux/zlib.h> #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/net.h> #define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION #define DEFLATE_DEF_WINBITS 11 #define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL struct deflate_ctx { struct z_stream_s comp_stream; struct z_stream_s decomp_stream; }; static int deflate_comp_init(struct deflate_ctx *ctx) { int ret = 0; struct z_stream_s *stream = &ctx->comp_stream; stream->workspace = vzalloc(zlib_deflate_workspacesize( -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL)); if (!stream->workspace) { ret = -ENOMEM; goto out; } ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED, -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL, Z_DEFAULT_STRATEGY); if (ret != Z_OK) { ret = -EINVAL; goto out_free; } out: return ret; out_free: vfree(stream->workspace); goto out; } static int deflate_decomp_init(struct deflate_ctx *ctx) { int ret = 0; struct z_stream_s *stream = &ctx->decomp_stream; stream->workspace = vzalloc(zlib_inflate_workspacesize()); if (!stream->workspace) { ret = -ENOMEM; goto out; } ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS); if (ret != Z_OK) { ret = -EINVAL; goto out_free; } out: return ret; out_free: vfree(stream->workspace); goto out; } static void deflate_comp_exit(struct deflate_ctx *ctx) { zlib_deflateEnd(&ctx->comp_stream); vfree(ctx->comp_stream.workspace); } static void deflate_decomp_exit(struct deflate_ctx *ctx) { zlib_inflateEnd(&ctx->decomp_stream); vfree(ctx->decomp_stream.workspace); } static int deflate_init(struct crypto_tfm *tfm) { struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); int ret; ret = deflate_comp_init(ctx); if (ret) goto out; ret = deflate_decomp_init(ctx); if (ret) deflate_comp_exit(ctx); out: return ret; } static void deflate_exit(struct crypto_tfm *tfm) { struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); deflate_comp_exit(ctx); deflate_decomp_exit(ctx); } static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { int ret = 0; struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); struct z_stream_s *stream = &dctx->comp_stream; ret = zlib_deflateReset(stream); if (ret != Z_OK) { ret = -EINVAL; goto out; } stream->next_in = (u8 *)src; stream->avail_in = slen; stream->next_out = (u8 *)dst; stream->avail_out = *dlen; ret = zlib_deflate(stream, Z_FINISH); if (ret != Z_STREAM_END) { ret = -EINVAL; goto out; } ret = 0; *dlen = stream->total_out; out: return ret; } static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { int ret = 0; struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); struct z_stream_s *stream = &dctx->decomp_stream; ret = zlib_inflateReset(stream); if (ret != Z_OK) { ret = -EINVAL; goto out; } stream->next_in = (u8 *)src; stream->avail_in = slen; stream->next_out = (u8 *)dst; stream->avail_out = *dlen; ret = zlib_inflate(stream, Z_SYNC_FLUSH); /* * Work around a bug in zlib, which sometimes wants to taste an extra * byte when being used in the (undocumented) raw deflate mode. * (From USAGI). */ if (ret == Z_OK && !stream->avail_in && stream->avail_out) { u8 zerostuff = 0; stream->next_in = &zerostuff; stream->avail_in = 1; ret = zlib_inflate(stream, Z_FINISH); } if (ret != Z_STREAM_END) { ret = -EINVAL; goto out; } ret = 0; *dlen = stream->total_out; out: return ret; } static struct crypto_alg alg = { .cra_name = "deflate", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct deflate_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_init = deflate_init, .cra_exit = deflate_exit, .cra_u = { .compress = { .coa_compress = deflate_compress, .coa_decompress = deflate_decompress } } }; static int __init deflate_mod_init(void) { return crypto_register_alg(&alg); } static void __exit deflate_mod_fini(void) { crypto_unregister_alg(&alg); } module_init(deflate_mod_init); module_exit(deflate_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP"); MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
gpl-2.0
sycolon/kernel_3.10
fs/proc/consoles.c
7841
2249
/* * Copyright (c) 2010 Werner Fink, Jiri Slaby * * Licensed under GPLv2 */ #include <linux/console.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/tty_driver.h> /* * This is handler for /proc/consoles */ static int show_console_dev(struct seq_file *m, void *v) { static const struct { short flag; char name; } con_flags[] = { { CON_ENABLED, 'E' }, { CON_CONSDEV, 'C' }, { CON_BOOT, 'B' }, { CON_PRINTBUFFER, 'p' }, { CON_BRL, 'b' }, { CON_ANYTIME, 'a' }, }; char flags[ARRAY_SIZE(con_flags) + 1]; struct console *con = v; unsigned int a; int len; dev_t dev = 0; if (con->device) { const struct tty_driver *driver; int index; driver = con->device(con, &index); if (driver) { dev = MKDEV(driver->major, driver->minor_start); dev += index; } } for (a = 0; a < ARRAY_SIZE(con_flags); a++) flags[a] = (con->flags & con_flags[a].flag) ? con_flags[a].name : ' '; flags[a] = 0; seq_printf(m, "%s%d%n", con->name, con->index, &len); len = 21 - len; if (len < 1) len = 1; seq_printf(m, "%*c%c%c%c (%s)", len, ' ', con->read ? 'R' : '-', con->write ? 'W' : '-', con->unblank ? 'U' : '-', flags); if (dev) seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev)); seq_printf(m, "\n"); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { struct console *con; loff_t off = 0; console_lock(); for_each_console(con) if (off++ == *pos) break; return con; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { struct console *con = v; ++*pos; return con->next; } static void c_stop(struct seq_file *m, void *v) { console_unlock(); } static const struct seq_operations consoles_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_console_dev }; static int consoles_open(struct inode *inode, struct file *file) { return seq_open(file, &consoles_op); } static const struct file_operations proc_consoles_operations = { .open = consoles_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init proc_consoles_init(void) { proc_create("consoles", 0, NULL, &proc_consoles_operations); return 0; } module_init(proc_consoles_init);
gpl-2.0
izzaeroth/FunkNA-KERNEL
drivers/input/ff-memless.c
8097
14540
/* * Force feedback support for memoryless devices * * Copyright (c) 2006 Anssi Hannula <anssi.hannula@gmail.com> * Copyright (c) 2006 Dmitry Torokhov <dtor@mail.ru> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define DEBUG */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/input.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/jiffies.h> #include "fixp-arith.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Anssi Hannula <anssi.hannula@gmail.com>"); MODULE_DESCRIPTION("Force feedback support for memoryless devices"); /* Number of effects handled with memoryless devices */ #define FF_MEMLESS_EFFECTS 16 /* Envelope update interval in ms */ #define FF_ENVELOPE_INTERVAL 50 #define FF_EFFECT_STARTED 0 #define FF_EFFECT_PLAYING 1 #define FF_EFFECT_ABORTING 2 struct ml_effect_state { struct ff_effect *effect; unsigned long flags; /* effect state (STARTED, PLAYING, etc) */ int count; /* loop count of the effect */ unsigned long play_at; /* start time */ unsigned long stop_at; /* stop time */ unsigned long adj_at; /* last time the effect was sent */ }; struct ml_device { void *private; struct ml_effect_state states[FF_MEMLESS_EFFECTS]; int gain; struct timer_list timer; struct input_dev *dev; int (*play_effect)(struct input_dev *dev, void *data, struct ff_effect *effect); }; static const struct ff_envelope *get_envelope(const struct ff_effect *effect) { static const struct ff_envelope empty_envelope; switch (effect->type) { case FF_PERIODIC: return &effect->u.periodic.envelope; case FF_CONSTANT: return &effect->u.constant.envelope; default: return &empty_envelope; } } /* * Check for the next time envelope requires an update on memoryless devices */ static unsigned long calculate_next_time(struct ml_effect_state *state) { const struct ff_envelope *envelope = get_envelope(state->effect); unsigned long attack_stop, fade_start, next_fade; if (envelope->attack_length) { attack_stop = state->play_at + msecs_to_jiffies(envelope->attack_length); if (time_before(state->adj_at, attack_stop)) return state->adj_at + msecs_to_jiffies(FF_ENVELOPE_INTERVAL); } if (state->effect->replay.length) { if (envelope->fade_length) { /* check when fading should start */ fade_start = state->stop_at - msecs_to_jiffies(envelope->fade_length); if (time_before(state->adj_at, fade_start)) return fade_start; /* already fading, advance to next checkpoint */ next_fade = state->adj_at + msecs_to_jiffies(FF_ENVELOPE_INTERVAL); if (time_before(next_fade, state->stop_at)) return next_fade; } return state->stop_at; } return state->play_at; } static void ml_schedule_timer(struct ml_device *ml) { struct ml_effect_state *state; unsigned long now = jiffies; unsigned long earliest = 0; unsigned long next_at; int events = 0; int i; pr_debug("calculating next timer\n"); for (i = 0; i < FF_MEMLESS_EFFECTS; i++) { state = &ml->states[i]; if (!test_bit(FF_EFFECT_STARTED, &state->flags)) continue; if (test_bit(FF_EFFECT_PLAYING, &state->flags)) next_at = calculate_next_time(state); else next_at = state->play_at; if (time_before_eq(now, next_at) && (++events == 1 || time_before(next_at, earliest))) earliest = next_at; } if (!events) { pr_debug("no actions\n"); del_timer(&ml->timer); } else { pr_debug("timer set\n"); mod_timer(&ml->timer, earliest); } } /* * Apply an envelope to a value */ static int apply_envelope(struct ml_effect_state *state, int value, struct ff_envelope *envelope) { struct ff_effect *effect = state->effect; unsigned long now = jiffies; int time_from_level; int time_of_envelope; int envelope_level; int difference; if (envelope->attack_length && time_before(now, state->play_at + msecs_to_jiffies(envelope->attack_length))) { pr_debug("value = 0x%x, attack_level = 0x%x\n", value, envelope->attack_level); time_from_level = jiffies_to_msecs(now - state->play_at); time_of_envelope = envelope->attack_length; envelope_level = min_t(__s16, envelope->attack_level, 0x7fff); } else if (envelope->fade_length && effect->replay.length && time_after(now, state->stop_at - msecs_to_jiffies(envelope->fade_length)) && time_before(now, state->stop_at)) { time_from_level = jiffies_to_msecs(state->stop_at - now); time_of_envelope = envelope->fade_length; envelope_level = min_t(__s16, envelope->fade_level, 0x7fff); } else return value; difference = abs(value) - envelope_level; pr_debug("difference = %d\n", difference); pr_debug("time_from_level = 0x%x\n", time_from_level); pr_debug("time_of_envelope = 0x%x\n", time_of_envelope); difference = difference * time_from_level / time_of_envelope; pr_debug("difference = %d\n", difference); return value < 0 ? -(difference + envelope_level) : (difference + envelope_level); } /* * Return the type the effect has to be converted into (memless devices) */ static int get_compatible_type(struct ff_device *ff, int effect_type) { if (test_bit(effect_type, ff->ffbit)) return effect_type; if (effect_type == FF_PERIODIC && test_bit(FF_RUMBLE, ff->ffbit)) return FF_RUMBLE; pr_err("invalid type in get_compatible_type()\n"); return 0; } /* * Only left/right direction should be used (under/over 0x8000) for * forward/reverse motor direction (to keep calculation fast & simple). */ static u16 ml_calculate_direction(u16 direction, u16 force, u16 new_direction, u16 new_force) { if (!force) return new_direction; if (!new_force) return direction; return (((u32)(direction >> 1) * force + (new_direction >> 1) * new_force) / (force + new_force)) << 1; } /* * Combine two effects and apply gain. */ static void ml_combine_effects(struct ff_effect *effect, struct ml_effect_state *state, int gain) { struct ff_effect *new = state->effect; unsigned int strong, weak, i; int x, y; fixp_t level; switch (new->type) { case FF_CONSTANT: i = new->direction * 360 / 0xffff; level = fixp_new16(apply_envelope(state, new->u.constant.level, &new->u.constant.envelope)); x = fixp_mult(fixp_sin(i), level) * gain / 0xffff; y = fixp_mult(-fixp_cos(i), level) * gain / 0xffff; /* * here we abuse ff_ramp to hold x and y of constant force * If in future any driver wants something else than x and y * in s8, this should be changed to something more generic */ effect->u.ramp.start_level = clamp_val(effect->u.ramp.start_level + x, -0x80, 0x7f); effect->u.ramp.end_level = clamp_val(effect->u.ramp.end_level + y, -0x80, 0x7f); break; case FF_RUMBLE: strong = (u32)new->u.rumble.strong_magnitude * gain / 0xffff; weak = (u32)new->u.rumble.weak_magnitude * gain / 0xffff; if (effect->u.rumble.strong_magnitude + strong) effect->direction = ml_calculate_direction( effect->direction, effect->u.rumble.strong_magnitude, new->direction, strong); else if (effect->u.rumble.weak_magnitude + weak) effect->direction = ml_calculate_direction( effect->direction, effect->u.rumble.weak_magnitude, new->direction, weak); else effect->direction = 0; effect->u.rumble.strong_magnitude = min(strong + effect->u.rumble.strong_magnitude, 0xffffU); effect->u.rumble.weak_magnitude = min(weak + effect->u.rumble.weak_magnitude, 0xffffU); break; case FF_PERIODIC: i = apply_envelope(state, abs(new->u.periodic.magnitude), &new->u.periodic.envelope); /* here we also scale it 0x7fff => 0xffff */ i = i * gain / 0x7fff; if (effect->u.rumble.strong_magnitude + i) effect->direction = ml_calculate_direction( effect->direction, effect->u.rumble.strong_magnitude, new->direction, i); else effect->direction = 0; effect->u.rumble.strong_magnitude = min(i + effect->u.rumble.strong_magnitude, 0xffffU); effect->u.rumble.weak_magnitude = min(i + effect->u.rumble.weak_magnitude, 0xffffU); break; default: pr_err("invalid type in ml_combine_effects()\n"); break; } } /* * Because memoryless devices have only one effect per effect type active * at one time we have to combine multiple effects into one */ static int ml_get_combo_effect(struct ml_device *ml, unsigned long *effect_handled, struct ff_effect *combo_effect) { struct ff_effect *effect; struct ml_effect_state *state; int effect_type; int i; memset(combo_effect, 0, sizeof(struct ff_effect)); for (i = 0; i < FF_MEMLESS_EFFECTS; i++) { if (__test_and_set_bit(i, effect_handled)) continue; state = &ml->states[i]; effect = state->effect; if (!test_bit(FF_EFFECT_STARTED, &state->flags)) continue; if (time_before(jiffies, state->play_at)) continue; /* * here we have started effects that are either * currently playing (and may need be aborted) * or need to start playing. */ effect_type = get_compatible_type(ml->dev->ff, effect->type); if (combo_effect->type != effect_type) { if (combo_effect->type != 0) { __clear_bit(i, effect_handled); continue; } combo_effect->type = effect_type; } if (__test_and_clear_bit(FF_EFFECT_ABORTING, &state->flags)) { __clear_bit(FF_EFFECT_PLAYING, &state->flags); __clear_bit(FF_EFFECT_STARTED, &state->flags); } else if (effect->replay.length && time_after_eq(jiffies, state->stop_at)) { __clear_bit(FF_EFFECT_PLAYING, &state->flags); if (--state->count <= 0) { __clear_bit(FF_EFFECT_STARTED, &state->flags); } else { state->play_at = jiffies + msecs_to_jiffies(effect->replay.delay); state->stop_at = state->play_at + msecs_to_jiffies(effect->replay.length); } } else { __set_bit(FF_EFFECT_PLAYING, &state->flags); state->adj_at = jiffies; ml_combine_effects(combo_effect, state, ml->gain); } } return combo_effect->type != 0; } static void ml_play_effects(struct ml_device *ml) { struct ff_effect effect; DECLARE_BITMAP(handled_bm, FF_MEMLESS_EFFECTS); memset(handled_bm, 0, sizeof(handled_bm)); while (ml_get_combo_effect(ml, handled_bm, &effect)) ml->play_effect(ml->dev, ml->private, &effect); ml_schedule_timer(ml); } static void ml_effect_timer(unsigned long timer_data) { struct input_dev *dev = (struct input_dev *)timer_data; struct ml_device *ml = dev->ff->private; unsigned long flags; pr_debug("timer: updating effects\n"); spin_lock_irqsave(&dev->event_lock, flags); ml_play_effects(ml); spin_unlock_irqrestore(&dev->event_lock, flags); } /* * Sets requested gain for FF effects. Called with dev->event_lock held. */ static void ml_ff_set_gain(struct input_dev *dev, u16 gain) { struct ml_device *ml = dev->ff->private; int i; ml->gain = gain; for (i = 0; i < FF_MEMLESS_EFFECTS; i++) __clear_bit(FF_EFFECT_PLAYING, &ml->states[i].flags); ml_play_effects(ml); } /* * Start/stop specified FF effect. Called with dev->event_lock held. */ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value) { struct ml_device *ml = dev->ff->private; struct ml_effect_state *state = &ml->states[effect_id]; if (value > 0) { pr_debug("initiated play\n"); __set_bit(FF_EFFECT_STARTED, &state->flags); state->count = value; state->play_at = jiffies + msecs_to_jiffies(state->effect->replay.delay); state->stop_at = state->play_at + msecs_to_jiffies(state->effect->replay.length); state->adj_at = state->play_at; } else { pr_debug("initiated stop\n"); if (test_bit(FF_EFFECT_PLAYING, &state->flags)) __set_bit(FF_EFFECT_ABORTING, &state->flags); else __clear_bit(FF_EFFECT_STARTED, &state->flags); } ml_play_effects(ml); return 0; } static int ml_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old) { struct ml_device *ml = dev->ff->private; struct ml_effect_state *state = &ml->states[effect->id]; spin_lock_irq(&dev->event_lock); if (test_bit(FF_EFFECT_STARTED, &state->flags)) { __clear_bit(FF_EFFECT_PLAYING, &state->flags); state->play_at = jiffies + msecs_to_jiffies(state->effect->replay.delay); state->stop_at = state->play_at + msecs_to_jiffies(state->effect->replay.length); state->adj_at = state->play_at; ml_schedule_timer(ml); } spin_unlock_irq(&dev->event_lock); return 0; } static void ml_ff_destroy(struct ff_device *ff) { struct ml_device *ml = ff->private; kfree(ml->private); } /** * input_ff_create_memless() - create memoryless force-feedback device * @dev: input device supporting force-feedback * @data: driver-specific data to be passed into @play_effect * @play_effect: driver-specific method for playing FF effect */ int input_ff_create_memless(struct input_dev *dev, void *data, int (*play_effect)(struct input_dev *, void *, struct ff_effect *)) { struct ml_device *ml; struct ff_device *ff; int error; int i; ml = kzalloc(sizeof(struct ml_device), GFP_KERNEL); if (!ml) return -ENOMEM; ml->dev = dev; ml->private = data; ml->play_effect = play_effect; ml->gain = 0xffff; setup_timer(&ml->timer, ml_effect_timer, (unsigned long)dev); set_bit(FF_GAIN, dev->ffbit); error = input_ff_create(dev, FF_MEMLESS_EFFECTS); if (error) { kfree(ml); return error; } ff = dev->ff; ff->private = ml; ff->upload = ml_ff_upload; ff->playback = ml_ff_playback; ff->set_gain = ml_ff_set_gain; ff->destroy = ml_ff_destroy; /* we can emulate periodic effects with RUMBLE */ if (test_bit(FF_RUMBLE, ff->ffbit)) { set_bit(FF_PERIODIC, dev->ffbit); set_bit(FF_SINE, dev->ffbit); set_bit(FF_TRIANGLE, dev->ffbit); set_bit(FF_SQUARE, dev->ffbit); } for (i = 0; i < FF_MEMLESS_EFFECTS; i++) ml->states[i].effect = &ff->effects[i]; return 0; } EXPORT_SYMBOL_GPL(input_ff_create_memless);
gpl-2.0
jacobrivers123/kernel-nk1-negalite-lt02ltespr
drivers/input/ff-memless.c
8097
14540
/* * Force feedback support for memoryless devices * * Copyright (c) 2006 Anssi Hannula <anssi.hannula@gmail.com> * Copyright (c) 2006 Dmitry Torokhov <dtor@mail.ru> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define DEBUG */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/input.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/jiffies.h> #include "fixp-arith.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Anssi Hannula <anssi.hannula@gmail.com>"); MODULE_DESCRIPTION("Force feedback support for memoryless devices"); /* Number of effects handled with memoryless devices */ #define FF_MEMLESS_EFFECTS 16 /* Envelope update interval in ms */ #define FF_ENVELOPE_INTERVAL 50 #define FF_EFFECT_STARTED 0 #define FF_EFFECT_PLAYING 1 #define FF_EFFECT_ABORTING 2 struct ml_effect_state { struct ff_effect *effect; unsigned long flags; /* effect state (STARTED, PLAYING, etc) */ int count; /* loop count of the effect */ unsigned long play_at; /* start time */ unsigned long stop_at; /* stop time */ unsigned long adj_at; /* last time the effect was sent */ }; struct ml_device { void *private; struct ml_effect_state states[FF_MEMLESS_EFFECTS]; int gain; struct timer_list timer; struct input_dev *dev; int (*play_effect)(struct input_dev *dev, void *data, struct ff_effect *effect); }; static const struct ff_envelope *get_envelope(const struct ff_effect *effect) { static const struct ff_envelope empty_envelope; switch (effect->type) { case FF_PERIODIC: return &effect->u.periodic.envelope; case FF_CONSTANT: return &effect->u.constant.envelope; default: return &empty_envelope; } } /* * Check for the next time envelope requires an update on memoryless devices */ static unsigned long calculate_next_time(struct ml_effect_state *state) { const struct ff_envelope *envelope = get_envelope(state->effect); unsigned long attack_stop, fade_start, next_fade; if (envelope->attack_length) { attack_stop = state->play_at + msecs_to_jiffies(envelope->attack_length); if (time_before(state->adj_at, attack_stop)) return state->adj_at + msecs_to_jiffies(FF_ENVELOPE_INTERVAL); } if (state->effect->replay.length) { if (envelope->fade_length) { /* check when fading should start */ fade_start = state->stop_at - msecs_to_jiffies(envelope->fade_length); if (time_before(state->adj_at, fade_start)) return fade_start; /* already fading, advance to next checkpoint */ next_fade = state->adj_at + msecs_to_jiffies(FF_ENVELOPE_INTERVAL); if (time_before(next_fade, state->stop_at)) return next_fade; } return state->stop_at; } return state->play_at; } static void ml_schedule_timer(struct ml_device *ml) { struct ml_effect_state *state; unsigned long now = jiffies; unsigned long earliest = 0; unsigned long next_at; int events = 0; int i; pr_debug("calculating next timer\n"); for (i = 0; i < FF_MEMLESS_EFFECTS; i++) { state = &ml->states[i]; if (!test_bit(FF_EFFECT_STARTED, &state->flags)) continue; if (test_bit(FF_EFFECT_PLAYING, &state->flags)) next_at = calculate_next_time(state); else next_at = state->play_at; if (time_before_eq(now, next_at) && (++events == 1 || time_before(next_at, earliest))) earliest = next_at; } if (!events) { pr_debug("no actions\n"); del_timer(&ml->timer); } else { pr_debug("timer set\n"); mod_timer(&ml->timer, earliest); } } /* * Apply an envelope to a value */ static int apply_envelope(struct ml_effect_state *state, int value, struct ff_envelope *envelope) { struct ff_effect *effect = state->effect; unsigned long now = jiffies; int time_from_level; int time_of_envelope; int envelope_level; int difference; if (envelope->attack_length && time_before(now, state->play_at + msecs_to_jiffies(envelope->attack_length))) { pr_debug("value = 0x%x, attack_level = 0x%x\n", value, envelope->attack_level); time_from_level = jiffies_to_msecs(now - state->play_at); time_of_envelope = envelope->attack_length; envelope_level = min_t(__s16, envelope->attack_level, 0x7fff); } else if (envelope->fade_length && effect->replay.length && time_after(now, state->stop_at - msecs_to_jiffies(envelope->fade_length)) && time_before(now, state->stop_at)) { time_from_level = jiffies_to_msecs(state->stop_at - now); time_of_envelope = envelope->fade_length; envelope_level = min_t(__s16, envelope->fade_level, 0x7fff); } else return value; difference = abs(value) - envelope_level; pr_debug("difference = %d\n", difference); pr_debug("time_from_level = 0x%x\n", time_from_level); pr_debug("time_of_envelope = 0x%x\n", time_of_envelope); difference = difference * time_from_level / time_of_envelope; pr_debug("difference = %d\n", difference); return value < 0 ? -(difference + envelope_level) : (difference + envelope_level); } /* * Return the type the effect has to be converted into (memless devices) */ static int get_compatible_type(struct ff_device *ff, int effect_type) { if (test_bit(effect_type, ff->ffbit)) return effect_type; if (effect_type == FF_PERIODIC && test_bit(FF_RUMBLE, ff->ffbit)) return FF_RUMBLE; pr_err("invalid type in get_compatible_type()\n"); return 0; } /* * Only left/right direction should be used (under/over 0x8000) for * forward/reverse motor direction (to keep calculation fast & simple). */ static u16 ml_calculate_direction(u16 direction, u16 force, u16 new_direction, u16 new_force) { if (!force) return new_direction; if (!new_force) return direction; return (((u32)(direction >> 1) * force + (new_direction >> 1) * new_force) / (force + new_force)) << 1; } /* * Combine two effects and apply gain. */ static void ml_combine_effects(struct ff_effect *effect, struct ml_effect_state *state, int gain) { struct ff_effect *new = state->effect; unsigned int strong, weak, i; int x, y; fixp_t level; switch (new->type) { case FF_CONSTANT: i = new->direction * 360 / 0xffff; level = fixp_new16(apply_envelope(state, new->u.constant.level, &new->u.constant.envelope)); x = fixp_mult(fixp_sin(i), level) * gain / 0xffff; y = fixp_mult(-fixp_cos(i), level) * gain / 0xffff; /* * here we abuse ff_ramp to hold x and y of constant force * If in future any driver wants something else than x and y * in s8, this should be changed to something more generic */ effect->u.ramp.start_level = clamp_val(effect->u.ramp.start_level + x, -0x80, 0x7f); effect->u.ramp.end_level = clamp_val(effect->u.ramp.end_level + y, -0x80, 0x7f); break; case FF_RUMBLE: strong = (u32)new->u.rumble.strong_magnitude * gain / 0xffff; weak = (u32)new->u.rumble.weak_magnitude * gain / 0xffff; if (effect->u.rumble.strong_magnitude + strong) effect->direction = ml_calculate_direction( effect->direction, effect->u.rumble.strong_magnitude, new->direction, strong); else if (effect->u.rumble.weak_magnitude + weak) effect->direction = ml_calculate_direction( effect->direction, effect->u.rumble.weak_magnitude, new->direction, weak); else effect->direction = 0; effect->u.rumble.strong_magnitude = min(strong + effect->u.rumble.strong_magnitude, 0xffffU); effect->u.rumble.weak_magnitude = min(weak + effect->u.rumble.weak_magnitude, 0xffffU); break; case FF_PERIODIC: i = apply_envelope(state, abs(new->u.periodic.magnitude), &new->u.periodic.envelope); /* here we also scale it 0x7fff => 0xffff */ i = i * gain / 0x7fff; if (effect->u.rumble.strong_magnitude + i) effect->direction = ml_calculate_direction( effect->direction, effect->u.rumble.strong_magnitude, new->direction, i); else effect->direction = 0; effect->u.rumble.strong_magnitude = min(i + effect->u.rumble.strong_magnitude, 0xffffU); effect->u.rumble.weak_magnitude = min(i + effect->u.rumble.weak_magnitude, 0xffffU); break; default: pr_err("invalid type in ml_combine_effects()\n"); break; } } /* * Because memoryless devices have only one effect per effect type active * at one time we have to combine multiple effects into one */ static int ml_get_combo_effect(struct ml_device *ml, unsigned long *effect_handled, struct ff_effect *combo_effect) { struct ff_effect *effect; struct ml_effect_state *state; int effect_type; int i; memset(combo_effect, 0, sizeof(struct ff_effect)); for (i = 0; i < FF_MEMLESS_EFFECTS; i++) { if (__test_and_set_bit(i, effect_handled)) continue; state = &ml->states[i]; effect = state->effect; if (!test_bit(FF_EFFECT_STARTED, &state->flags)) continue; if (time_before(jiffies, state->play_at)) continue; /* * here we have started effects that are either * currently playing (and may need be aborted) * or need to start playing. */ effect_type = get_compatible_type(ml->dev->ff, effect->type); if (combo_effect->type != effect_type) { if (combo_effect->type != 0) { __clear_bit(i, effect_handled); continue; } combo_effect->type = effect_type; } if (__test_and_clear_bit(FF_EFFECT_ABORTING, &state->flags)) { __clear_bit(FF_EFFECT_PLAYING, &state->flags); __clear_bit(FF_EFFECT_STARTED, &state->flags); } else if (effect->replay.length && time_after_eq(jiffies, state->stop_at)) { __clear_bit(FF_EFFECT_PLAYING, &state->flags); if (--state->count <= 0) { __clear_bit(FF_EFFECT_STARTED, &state->flags); } else { state->play_at = jiffies + msecs_to_jiffies(effect->replay.delay); state->stop_at = state->play_at + msecs_to_jiffies(effect->replay.length); } } else { __set_bit(FF_EFFECT_PLAYING, &state->flags); state->adj_at = jiffies; ml_combine_effects(combo_effect, state, ml->gain); } } return combo_effect->type != 0; } static void ml_play_effects(struct ml_device *ml) { struct ff_effect effect; DECLARE_BITMAP(handled_bm, FF_MEMLESS_EFFECTS); memset(handled_bm, 0, sizeof(handled_bm)); while (ml_get_combo_effect(ml, handled_bm, &effect)) ml->play_effect(ml->dev, ml->private, &effect); ml_schedule_timer(ml); } static void ml_effect_timer(unsigned long timer_data) { struct input_dev *dev = (struct input_dev *)timer_data; struct ml_device *ml = dev->ff->private; unsigned long flags; pr_debug("timer: updating effects\n"); spin_lock_irqsave(&dev->event_lock, flags); ml_play_effects(ml); spin_unlock_irqrestore(&dev->event_lock, flags); } /* * Sets requested gain for FF effects. Called with dev->event_lock held. */ static void ml_ff_set_gain(struct input_dev *dev, u16 gain) { struct ml_device *ml = dev->ff->private; int i; ml->gain = gain; for (i = 0; i < FF_MEMLESS_EFFECTS; i++) __clear_bit(FF_EFFECT_PLAYING, &ml->states[i].flags); ml_play_effects(ml); } /* * Start/stop specified FF effect. Called with dev->event_lock held. */ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value) { struct ml_device *ml = dev->ff->private; struct ml_effect_state *state = &ml->states[effect_id]; if (value > 0) { pr_debug("initiated play\n"); __set_bit(FF_EFFECT_STARTED, &state->flags); state->count = value; state->play_at = jiffies + msecs_to_jiffies(state->effect->replay.delay); state->stop_at = state->play_at + msecs_to_jiffies(state->effect->replay.length); state->adj_at = state->play_at; } else { pr_debug("initiated stop\n"); if (test_bit(FF_EFFECT_PLAYING, &state->flags)) __set_bit(FF_EFFECT_ABORTING, &state->flags); else __clear_bit(FF_EFFECT_STARTED, &state->flags); } ml_play_effects(ml); return 0; } static int ml_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old) { struct ml_device *ml = dev->ff->private; struct ml_effect_state *state = &ml->states[effect->id]; spin_lock_irq(&dev->event_lock); if (test_bit(FF_EFFECT_STARTED, &state->flags)) { __clear_bit(FF_EFFECT_PLAYING, &state->flags); state->play_at = jiffies + msecs_to_jiffies(state->effect->replay.delay); state->stop_at = state->play_at + msecs_to_jiffies(state->effect->replay.length); state->adj_at = state->play_at; ml_schedule_timer(ml); } spin_unlock_irq(&dev->event_lock); return 0; } static void ml_ff_destroy(struct ff_device *ff) { struct ml_device *ml = ff->private; kfree(ml->private); } /** * input_ff_create_memless() - create memoryless force-feedback device * @dev: input device supporting force-feedback * @data: driver-specific data to be passed into @play_effect * @play_effect: driver-specific method for playing FF effect */ int input_ff_create_memless(struct input_dev *dev, void *data, int (*play_effect)(struct input_dev *, void *, struct ff_effect *)) { struct ml_device *ml; struct ff_device *ff; int error; int i; ml = kzalloc(sizeof(struct ml_device), GFP_KERNEL); if (!ml) return -ENOMEM; ml->dev = dev; ml->private = data; ml->play_effect = play_effect; ml->gain = 0xffff; setup_timer(&ml->timer, ml_effect_timer, (unsigned long)dev); set_bit(FF_GAIN, dev->ffbit); error = input_ff_create(dev, FF_MEMLESS_EFFECTS); if (error) { kfree(ml); return error; } ff = dev->ff; ff->private = ml; ff->upload = ml_ff_upload; ff->playback = ml_ff_playback; ff->set_gain = ml_ff_set_gain; ff->destroy = ml_ff_destroy; /* we can emulate periodic effects with RUMBLE */ if (test_bit(FF_RUMBLE, ff->ffbit)) { set_bit(FF_PERIODIC, dev->ffbit); set_bit(FF_SINE, dev->ffbit); set_bit(FF_TRIANGLE, dev->ffbit); set_bit(FF_SQUARE, dev->ffbit); } for (i = 0; i < FF_MEMLESS_EFFECTS; i++) ml->states[i].effect = &ff->effects[i]; return 0; } EXPORT_SYMBOL_GPL(input_ff_create_memless);
gpl-2.0
Zzomborg/Learning-2
drivers/gpu/drm/nouveau/nv50_mc.c
9377
1404
/* * Copyright (C) 2007 Ben Skeggs. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" int nv50_mc_init(struct drm_device *dev) { nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); return 0; } void nv50_mc_takedown(struct drm_device *dev) { }
gpl-2.0
georgecherian/linux
drivers/s390/net/smsgiucv_app.c
9633
5503
/* * Deliver z/VM CP special messages (SMSG) as uevents. * * The driver registers for z/VM CP special messages with the * "APP" prefix. Incoming messages are delivered to user space * as uevents. * * Copyright IBM Corp. 2010 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> * */ #define KMSG_COMPONENT "smsgiucv_app" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/ctype.h> #include <linux/err.h> #include <linux/device.h> #include <linux/list.h> #include <linux/kobject.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <net/iucv/iucv.h> #include "smsgiucv.h" /* prefix used for SMSG registration */ #define SMSG_PREFIX "APP" /* SMSG related uevent environment variables */ #define ENV_SENDER_STR "SMSG_SENDER=" #define ENV_SENDER_LEN (strlen(ENV_SENDER_STR) + 8 + 1) #define ENV_PREFIX_STR "SMSG_ID=" #define ENV_PREFIX_LEN (strlen(ENV_PREFIX_STR) + \ strlen(SMSG_PREFIX) + 1) #define ENV_TEXT_STR "SMSG_TEXT=" #define ENV_TEXT_LEN(msg) (strlen(ENV_TEXT_STR) + strlen((msg)) + 1) /* z/VM user ID which is permitted to send SMSGs * If the value is undefined or empty (""), special messages are * accepted from any z/VM user ID. */ static char *sender; module_param(sender, charp, 0400); MODULE_PARM_DESC(sender, "z/VM user ID from which CP SMSGs are accepted"); /* SMSG device representation */ static struct device *smsg_app_dev; /* list element for queuing received messages for delivery */ struct smsg_app_event { struct list_head list; char *buf; char *envp[4]; }; /* queue for outgoing uevents */ static LIST_HEAD(smsg_event_queue); static DEFINE_SPINLOCK(smsg_event_queue_lock); static void smsg_app_event_free(struct smsg_app_event *ev) { kfree(ev->buf); kfree(ev); } static struct smsg_app_event *smsg_app_event_alloc(const char *from, const char *msg) { struct smsg_app_event *ev; ev = kzalloc(sizeof(*ev), GFP_ATOMIC); if (!ev) return NULL; ev->buf = kzalloc(ENV_SENDER_LEN + ENV_PREFIX_LEN + ENV_TEXT_LEN(msg), GFP_ATOMIC); if (!ev->buf) { kfree(ev); return NULL; } /* setting up environment pointers into buf */ ev->envp[0] = ev->buf; ev->envp[1] = ev->envp[0] + ENV_SENDER_LEN; ev->envp[2] = ev->envp[1] + ENV_PREFIX_LEN; ev->envp[3] = NULL; /* setting up environment: sender, prefix name, and message text */ snprintf(ev->envp[0], ENV_SENDER_LEN, ENV_SENDER_STR "%s", from); snprintf(ev->envp[1], ENV_PREFIX_LEN, ENV_PREFIX_STR "%s", SMSG_PREFIX); snprintf(ev->envp[2], ENV_TEXT_LEN(msg), ENV_TEXT_STR "%s", msg); return ev; } static void smsg_event_work_fn(struct work_struct *work) { LIST_HEAD(event_queue); struct smsg_app_event *p, *n; struct device *dev; dev = get_device(smsg_app_dev); if (!dev) return; spin_lock_bh(&smsg_event_queue_lock); list_splice_init(&smsg_event_queue, &event_queue); spin_unlock_bh(&smsg_event_queue_lock); list_for_each_entry_safe(p, n, &event_queue, list) { list_del(&p->list); kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, p->envp); smsg_app_event_free(p); } put_device(dev); } static DECLARE_WORK(smsg_event_work, smsg_event_work_fn); static void smsg_app_callback(const char *from, char *msg) { struct smsg_app_event *se; /* check if the originating z/VM user ID matches * the configured sender. */ if (sender && strlen(sender) > 0 && strcmp(from, sender) != 0) return; /* get start of message text (skip prefix and leading blanks) */ msg += strlen(SMSG_PREFIX); while (*msg && isspace(*msg)) msg++; if (*msg == '\0') return; /* allocate event list element and its environment */ se = smsg_app_event_alloc(from, msg); if (!se) return; /* queue event and schedule work function */ spin_lock(&smsg_event_queue_lock); list_add_tail(&se->list, &smsg_event_queue); spin_unlock(&smsg_event_queue_lock); schedule_work(&smsg_event_work); return; } static int __init smsgiucv_app_init(void) { struct device_driver *smsgiucv_drv; int rc; if (!MACHINE_IS_VM) return -ENODEV; smsg_app_dev = kzalloc(sizeof(*smsg_app_dev), GFP_KERNEL); if (!smsg_app_dev) return -ENOMEM; smsgiucv_drv = driver_find(SMSGIUCV_DRV_NAME, &iucv_bus); if (!smsgiucv_drv) { kfree(smsg_app_dev); return -ENODEV; } rc = dev_set_name(smsg_app_dev, KMSG_COMPONENT); if (rc) { kfree(smsg_app_dev); goto fail; } smsg_app_dev->bus = &iucv_bus; smsg_app_dev->parent = iucv_root; smsg_app_dev->release = (void (*)(struct device *)) kfree; smsg_app_dev->driver = smsgiucv_drv; rc = device_register(smsg_app_dev); if (rc) { put_device(smsg_app_dev); goto fail; } /* convert sender to uppercase characters */ if (sender) { int len = strlen(sender); while (len--) sender[len] = toupper(sender[len]); } /* register with the smsgiucv device driver */ rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback); if (rc) { device_unregister(smsg_app_dev); goto fail; } rc = 0; fail: return rc; } module_init(smsgiucv_app_init); static void __exit smsgiucv_app_exit(void) { /* unregister callback */ smsg_unregister_callback(SMSG_PREFIX, smsg_app_callback); /* cancel pending work and flush any queued event work */ cancel_work_sync(&smsg_event_work); smsg_event_work_fn(&smsg_event_work); device_unregister(smsg_app_dev); } module_exit(smsgiucv_app_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Deliver z/VM CP SMSG as uevents"); MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");
gpl-2.0
mlachwani/Android_4.4.2_MotoG_Kernel
arch/alpha/kernel/core_titan.c
13473
20045
/* * linux/arch/alpha/kernel/core_titan.c * * Code common to all TITAN core logic chips. */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_titan.h> #undef __EXTERN_INLINE #include <linux/module.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/bootmem.h> #include <asm/ptrace.h> #include <asm/smp.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/vga.h> #include "proto.h" #include "pci_impl.h" /* Save Titan configuration data as the console had it set up. */ struct { unsigned long wsba[4]; unsigned long wsm[4]; unsigned long tba[4]; } saved_config[4] __attribute__((common)); /* * Is PChip 1 present? No need to query it more than once. */ static int titan_pchip1_present; /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Routines to access TIG registers. */ static inline volatile unsigned long * mk_tig_addr(int offset) { return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6)); } static inline u8 titan_read_tig(int offset, u8 value) { volatile unsigned long *tig_addr = mk_tig_addr(offset); return (u8)(*tig_addr & 0xff); } static inline void titan_write_tig(int offset, u8 value) { volatile unsigned long *tig_addr = mk_tig_addr(offset); *tig_addr = (unsigned long)value; } /* * Given a bus, device, and function number, compute resulting * configuration space address * accordingly. It is therefore not safe to have concurrent * invocations to configuration space access routines, but there * really shouldn't be any need for this. * * Note that all config space accesses use Type 1 address format. * * Note also that type 1 is determined by non-zero bus number. * * Type 1: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:24 reserved * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, unsigned char *type1) { struct pci_controller *hose = pbus->sysdata; unsigned long addr; u8 bus = pbus->number; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " "pci_addr=0x%p, type1=0x%p)\n", bus, device_fn, where, pci_addr, type1)); if (!pbus->parent) /* No parent means peer PCI bus. */ bus = 0; *type1 = (bus != 0); addr = (bus << 16) | (device_fn << 8) | where; addr |= hose->config_space_base; *pci_addr = addr; DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return 0; } static int titan_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int titan_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops titan_pci_ops = { .read = titan_read_config, .write = titan_write_config, }; void titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { titan_pachip *pachip = (hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0; titan_pachip_port *port; volatile unsigned long *csr; unsigned long value; /* Get the right hose. */ port = &pachip->g_port; if (hose->index & 2) port = &pachip->a_port; /* We can invalidate up to 8 tlb entries in a go. The flush matches against <31:16> in the pci address. Note that gtlbi* and atlbi* are in the same place in the g_port and a_port, respectively, so the g_port offset can be used even if hose is an a_port */ csr = &port->port_specific.g.gtlbia.csr; if (((start ^ end) & 0xffff0000) == 0) csr = &port->port_specific.g.gtlbiv.csr; /* For TBIA, it doesn't matter what value we write. For TBI, it's the shifted tag bits. */ value = (start & 0xffff0000) >> 12; wmb(); *csr = value; mb(); *csr; } static int titan_query_agp(titan_pachip_port *port) { union TPAchipPCTL pctl; /* set up APCTL */ pctl.pctl_q_whole = port->pctl.csr; return pctl.pctl_r_bits.apctl_v_agp_present; } static void __init titan_init_one_pachip_port(titan_pachip_port *port, int index) { struct pci_controller *hose; hose = alloc_pci_controller(); if (index == 0) pci_isa_hose = hose; hose->io_space = alloc_resource(); hose->mem_space = alloc_resource(); /* * This is for userland consumption. The 40-bit PIO bias that we * use in the kernel through KSEG doesn't work in the page table * based user mappings. (43-bit KSEG sign extends the physical * address from bit 40 to hit the I/O bit - mapped addresses don't). * So make sure we get the 43-bit PIO bias. */ hose->sparse_mem_base = 0; hose->sparse_io_base = 0; hose->dense_mem_base = (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL; hose->dense_io_base = (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL; hose->config_space_base = TITAN_CONF(index); hose->index = index; hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS; hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1; hose->io_space->name = pci_io_names[index]; hose->io_space->flags = IORESOURCE_IO; hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS; hose->mem_space->end = hose->mem_space->start + 0xffffffff; hose->mem_space->name = pci_mem_names[index]; hose->mem_space->flags = IORESOURCE_MEM; if (request_resource(&ioport_resource, hose->io_space) < 0) printk(KERN_ERR "Failed to request IO on hose %d\n", index); if (request_resource(&iomem_resource, hose->mem_space) < 0) printk(KERN_ERR "Failed to request MEM on hose %d\n", index); /* * Save the existing PCI window translations. SRM will * need them when we go to reboot. */ saved_config[index].wsba[0] = port->wsba[0].csr; saved_config[index].wsm[0] = port->wsm[0].csr; saved_config[index].tba[0] = port->tba[0].csr; saved_config[index].wsba[1] = port->wsba[1].csr; saved_config[index].wsm[1] = port->wsm[1].csr; saved_config[index].tba[1] = port->tba[1].csr; saved_config[index].wsba[2] = port->wsba[2].csr; saved_config[index].wsm[2] = port->wsm[2].csr; saved_config[index].tba[2] = port->tba[2].csr; saved_config[index].wsba[3] = port->wsba[3].csr; saved_config[index].wsm[3] = port->wsm[3].csr; saved_config[index].tba[3] = port->tba[3].csr; /* * Set up the PCI to main memory translation windows. * * Note: Window 3 on Titan is Scatter-Gather ONLY. * * Window 0 is scatter-gather 8MB at 8MB (for isa) * Window 1 is direct access 1GB at 2GB * Window 2 is scatter-gather 1GB at 3GB */ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); hose->sg_isa->align_entry = 8; /* 64KB for ISA */ hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0); hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */ port->wsba[0].csr = hose->sg_isa->dma_base | 3; port->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000; port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); port->wsba[1].csr = __direct_map_base | 1; port->wsm[1].csr = (__direct_map_size - 1) & 0xfff00000; port->tba[1].csr = 0; port->wsba[2].csr = hose->sg_pci->dma_base | 3; port->wsm[2].csr = (hose->sg_pci->size - 1) & 0xfff00000; port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes); port->wsba[3].csr = 0; /* Enable the Monster Window to make DAC pci64 possible. */ port->pctl.csr |= pctl_m_mwin; /* * If it's an AGP port, initialize agplastwr. */ if (titan_query_agp(port)) port->port_specific.a.agplastwr.csr = __direct_map_base; titan_pci_tbi(hose, 0, -1); } static void __init titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1) { titan_pchip1_present = TITAN_cchip->csc.csr & 1L<<14; /* Init the ports in hose order... */ titan_init_one_pachip_port(&pachip0->g_port, 0); /* hose 0 */ if (titan_pchip1_present) titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */ titan_init_one_pachip_port(&pachip0->a_port, 2); /* hose 2 */ if (titan_pchip1_present) titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */ } void __init titan_init_arch(void) { #if 0 printk("%s: titan_init_arch()\n", __func__); printk("%s: CChip registers:\n", __func__); printk("%s: CSR_CSC 0x%lx\n", __func__, TITAN_cchip->csc.csr); printk("%s: CSR_MTR 0x%lx\n", __func__, TITAN_cchip->mtr.csr); printk("%s: CSR_MISC 0x%lx\n", __func__, TITAN_cchip->misc.csr); printk("%s: CSR_DIM0 0x%lx\n", __func__, TITAN_cchip->dim0.csr); printk("%s: CSR_DIM1 0x%lx\n", __func__, TITAN_cchip->dim1.csr); printk("%s: CSR_DIR0 0x%lx\n", __func__, TITAN_cchip->dir0.csr); printk("%s: CSR_DIR1 0x%lx\n", __func__, TITAN_cchip->dir1.csr); printk("%s: CSR_DRIR 0x%lx\n", __func__, TITAN_cchip->drir.csr); printk("%s: DChip registers:\n", __func__); printk("%s: CSR_DSC 0x%lx\n", __func__, TITAN_dchip->dsc.csr); printk("%s: CSR_STR 0x%lx\n", __func__, TITAN_dchip->str.csr); printk("%s: CSR_DREV 0x%lx\n", __func__, TITAN_dchip->drev.csr); #endif boot_cpuid = __hard_smp_processor_id(); /* With multiple PCI busses, we play with I/O as physical addrs. */ ioport_resource.end = ~0UL; iomem_resource.end = ~0UL; /* PCI DMA Direct Mapping is 1GB at 2GB. */ __direct_map_base = 0x80000000; __direct_map_size = 0x40000000; /* Init the PA chip(s). */ titan_init_pachips(TITAN_pachip0, TITAN_pachip1); /* Check for graphic console location (if any). */ find_console_vga_hose(); } static void titan_kill_one_pachip_port(titan_pachip_port *port, int index) { port->wsba[0].csr = saved_config[index].wsba[0]; port->wsm[0].csr = saved_config[index].wsm[0]; port->tba[0].csr = saved_config[index].tba[0]; port->wsba[1].csr = saved_config[index].wsba[1]; port->wsm[1].csr = saved_config[index].wsm[1]; port->tba[1].csr = saved_config[index].tba[1]; port->wsba[2].csr = saved_config[index].wsba[2]; port->wsm[2].csr = saved_config[index].wsm[2]; port->tba[2].csr = saved_config[index].tba[2]; port->wsba[3].csr = saved_config[index].wsba[3]; port->wsm[3].csr = saved_config[index].wsm[3]; port->tba[3].csr = saved_config[index].tba[3]; } static void titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1) { if (titan_pchip1_present) { titan_kill_one_pachip_port(&pachip1->g_port, 1); titan_kill_one_pachip_port(&pachip1->a_port, 3); } titan_kill_one_pachip_port(&pachip0->g_port, 0); titan_kill_one_pachip_port(&pachip0->a_port, 2); } void titan_kill_arch(int mode) { titan_kill_pachips(TITAN_pachip0, TITAN_pachip1); } /* * IO map support. */ void __iomem * titan_ioportmap(unsigned long addr) { FIXUP_IOADDR_VGA(addr); return (void __iomem *)(addr + TITAN_IO_BIAS); } void __iomem * titan_ioremap(unsigned long addr, unsigned long size) { int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT; unsigned long baddr = addr & ~TITAN_HOSE_MASK; unsigned long last = baddr + size - 1; struct pci_controller *hose; struct vm_struct *area; unsigned long vaddr; unsigned long *ptes; unsigned long pfn; /* * Adjust the address and hose, if necessary. */ if (pci_vga_hose && __is_mem_vga(addr)) { h = pci_vga_hose->index; addr += pci_vga_hose->mem_space->start; } /* * Find the hose. */ for (hose = hose_head; hose; hose = hose->next) if (hose->index == h) break; if (!hose) return NULL; /* * Is it direct-mapped? */ if ((baddr >= __direct_map_base) && ((baddr + size - 1) < __direct_map_base + __direct_map_size)) { vaddr = addr - __direct_map_base + TITAN_MEM_BIAS; return (void __iomem *) vaddr; } /* * Check the scatter-gather arena. */ if (hose->sg_pci && baddr >= (unsigned long)hose->sg_pci->dma_base && last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){ /* * Adjust the limits (mappings must be page aligned) */ baddr -= hose->sg_pci->dma_base; last -= hose->sg_pci->dma_base; baddr &= PAGE_MASK; size = PAGE_ALIGN(last) - baddr; /* * Map it */ area = get_vm_area(size, VM_IOREMAP); if (!area) { printk("ioremap failed... no vm_area...\n"); return NULL; } ptes = hose->sg_pci->ptes; for (vaddr = (unsigned long)area->addr; baddr <= last; baddr += PAGE_SIZE, vaddr += PAGE_SIZE) { pfn = ptes[baddr >> PAGE_SHIFT]; if (!(pfn & 1)) { printk("ioremap failed... pte not valid...\n"); vfree(area->addr); return NULL; } pfn >>= 1; /* make it a true pfn */ if (__alpha_remap_area_pages(vaddr, pfn << PAGE_SHIFT, PAGE_SIZE, 0)) { printk("FAILED to remap_area_pages...\n"); vfree(area->addr); return NULL; } } flush_tlb_all(); vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); return (void __iomem *) vaddr; } /* Assume a legacy (read: VGA) address, and return appropriately. */ return (void __iomem *)(addr + TITAN_MEM_BIAS); } void titan_iounmap(volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr >= VMALLOC_START) vfree((void *)(PAGE_MASK & addr)); } int titan_is_mmio(const volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr >= VMALLOC_START) return 1; else return (addr & 0x100000000UL) == 0; } #ifndef CONFIG_ALPHA_GENERIC EXPORT_SYMBOL(titan_ioportmap); EXPORT_SYMBOL(titan_ioremap); EXPORT_SYMBOL(titan_iounmap); EXPORT_SYMBOL(titan_is_mmio); #endif /* * AGP GART Support. */ #include <linux/agp_backend.h> #include <asm/agp_backend.h> #include <linux/slab.h> #include <linux/delay.h> struct titan_agp_aperture { struct pci_iommu_arena *arena; long pg_start; long pg_count; }; static int titan_agp_setup(alpha_agp_info *agp) { struct titan_agp_aperture *aper; if (!alpha_agpgart_size) return -ENOMEM; aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL); if (aper == NULL) return -ENOMEM; aper->arena = agp->hose->sg_pci; aper->pg_count = alpha_agpgart_size / PAGE_SIZE; aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, aper->pg_count - 1); if (aper->pg_start < 0) { printk(KERN_ERR "Failed to reserve AGP memory\n"); kfree(aper); return -ENOMEM; } agp->aperture.bus_base = aper->arena->dma_base + aper->pg_start * PAGE_SIZE; agp->aperture.size = aper->pg_count * PAGE_SIZE; agp->aperture.sysdata = aper; return 0; } static void titan_agp_cleanup(alpha_agp_info *agp) { struct titan_agp_aperture *aper = agp->aperture.sysdata; int status; status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); if (status == -EBUSY) { printk(KERN_WARNING "Attempted to release bound AGP memory - unbinding\n"); iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); } if (status < 0) printk(KERN_ERR "Failed to release AGP memory\n"); kfree(aper); kfree(agp); } static int titan_agp_configure(alpha_agp_info *agp) { union TPAchipPCTL pctl; titan_pachip_port *port = agp->private; pctl.pctl_q_whole = port->pctl.csr; /* Side-Band Addressing? */ pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba; /* AGP Rate? */ pctl.pctl_r_bits.apctl_v_agp_rate = 0; /* 1x */ if (agp->mode.bits.rate & 2) pctl.pctl_r_bits.apctl_v_agp_rate = 1; /* 2x */ #if 0 if (agp->mode.bits.rate & 4) pctl.pctl_r_bits.apctl_v_agp_rate = 2; /* 4x */ #endif /* RQ Depth? */ pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2; pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7; /* * AGP Enable. */ pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable; /* Tell the user. */ printk("Enabling AGP: %dX%s\n", 1 << pctl.pctl_r_bits.apctl_v_agp_rate, pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : ""); /* Write it. */ port->pctl.csr = pctl.pctl_q_whole; /* And wait at least 5000 66MHz cycles (per Titan spec). */ udelay(100); return 0; } static int titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) { struct titan_agp_aperture *aper = agp->aperture.sysdata; return iommu_bind(aper->arena, aper->pg_start + pg_start, mem->page_count, mem->pages); } static int titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) { struct titan_agp_aperture *aper = agp->aperture.sysdata; return iommu_unbind(aper->arena, aper->pg_start + pg_start, mem->page_count); } static unsigned long titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr) { struct titan_agp_aperture *aper = agp->aperture.sysdata; unsigned long baddr = addr - aper->arena->dma_base; unsigned long pte; if (addr < agp->aperture.bus_base || addr >= agp->aperture.bus_base + agp->aperture.size) { printk("%s: addr out of range\n", __func__); return -EINVAL; } pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; if (!(pte & 1)) { printk("%s: pte not valid\n", __func__); return -EINVAL; } return (pte >> 1) << PAGE_SHIFT; } struct alpha_agp_ops titan_agp_ops = { .setup = titan_agp_setup, .cleanup = titan_agp_cleanup, .configure = titan_agp_configure, .bind = titan_agp_bind_memory, .unbind = titan_agp_unbind_memory, .translate = titan_agp_translate }; alpha_agp_info * titan_agp_info(void) { alpha_agp_info *agp; struct pci_controller *hose; titan_pachip_port *port; int hosenum = -1; union TPAchipPCTL pctl; /* * Find the AGP port. */ port = &TITAN_pachip0->a_port; if (titan_query_agp(port)) hosenum = 2; if (hosenum < 0 && titan_pchip1_present && titan_query_agp(port = &TITAN_pachip1->a_port)) hosenum = 3; /* * Find the hose the port is on. */ for (hose = hose_head; hose; hose = hose->next) if (hose->index == hosenum) break; if (!hose || !hose->sg_pci) return NULL; /* * Allocate the info structure. */ agp = kmalloc(sizeof(*agp), GFP_KERNEL); if (!agp) return NULL; /* * Fill it in. */ agp->hose = hose; agp->private = port; agp->ops = &titan_agp_ops; /* * Aperture - not configured until ops.setup(). * * FIXME - should we go ahead and allocate it here? */ agp->aperture.bus_base = 0; agp->aperture.size = 0; agp->aperture.sysdata = NULL; /* * Capabilities. */ agp->capability.lw = 0; agp->capability.bits.rate = 3; /* 2x, 1x */ agp->capability.bits.sba = 1; agp->capability.bits.rq = 7; /* 8 - 1 */ /* * Mode. */ pctl.pctl_q_whole = port->pctl.csr; agp->mode.lw = 0; agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate; agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en; agp->mode.bits.rq = 7; /* RQ Depth? */ agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en; return agp; }
gpl-2.0
kozmikkick/KozmiKKernel-HTC-One
arch/alpha/kernel/core_titan.c
13473
20045
/* * linux/arch/alpha/kernel/core_titan.c * * Code common to all TITAN core logic chips. */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_titan.h> #undef __EXTERN_INLINE #include <linux/module.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/bootmem.h> #include <asm/ptrace.h> #include <asm/smp.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/vga.h> #include "proto.h" #include "pci_impl.h" /* Save Titan configuration data as the console had it set up. */ struct { unsigned long wsba[4]; unsigned long wsm[4]; unsigned long tba[4]; } saved_config[4] __attribute__((common)); /* * Is PChip 1 present? No need to query it more than once. */ static int titan_pchip1_present; /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Routines to access TIG registers. */ static inline volatile unsigned long * mk_tig_addr(int offset) { return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6)); } static inline u8 titan_read_tig(int offset, u8 value) { volatile unsigned long *tig_addr = mk_tig_addr(offset); return (u8)(*tig_addr & 0xff); } static inline void titan_write_tig(int offset, u8 value) { volatile unsigned long *tig_addr = mk_tig_addr(offset); *tig_addr = (unsigned long)value; } /* * Given a bus, device, and function number, compute resulting * configuration space address * accordingly. It is therefore not safe to have concurrent * invocations to configuration space access routines, but there * really shouldn't be any need for this. * * Note that all config space accesses use Type 1 address format. * * Note also that type 1 is determined by non-zero bus number. * * Type 1: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:24 reserved * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, unsigned char *type1) { struct pci_controller *hose = pbus->sysdata; unsigned long addr; u8 bus = pbus->number; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " "pci_addr=0x%p, type1=0x%p)\n", bus, device_fn, where, pci_addr, type1)); if (!pbus->parent) /* No parent means peer PCI bus. */ bus = 0; *type1 = (bus != 0); addr = (bus << 16) | (device_fn << 8) | where; addr |= hose->config_space_base; *pci_addr = addr; DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return 0; } static int titan_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int titan_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops titan_pci_ops = { .read = titan_read_config, .write = titan_write_config, }; void titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { titan_pachip *pachip = (hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0; titan_pachip_port *port; volatile unsigned long *csr; unsigned long value; /* Get the right hose. */ port = &pachip->g_port; if (hose->index & 2) port = &pachip->a_port; /* We can invalidate up to 8 tlb entries in a go. The flush matches against <31:16> in the pci address. Note that gtlbi* and atlbi* are in the same place in the g_port and a_port, respectively, so the g_port offset can be used even if hose is an a_port */ csr = &port->port_specific.g.gtlbia.csr; if (((start ^ end) & 0xffff0000) == 0) csr = &port->port_specific.g.gtlbiv.csr; /* For TBIA, it doesn't matter what value we write. For TBI, it's the shifted tag bits. */ value = (start & 0xffff0000) >> 12; wmb(); *csr = value; mb(); *csr; } static int titan_query_agp(titan_pachip_port *port) { union TPAchipPCTL pctl; /* set up APCTL */ pctl.pctl_q_whole = port->pctl.csr; return pctl.pctl_r_bits.apctl_v_agp_present; } static void __init titan_init_one_pachip_port(titan_pachip_port *port, int index) { struct pci_controller *hose; hose = alloc_pci_controller(); if (index == 0) pci_isa_hose = hose; hose->io_space = alloc_resource(); hose->mem_space = alloc_resource(); /* * This is for userland consumption. The 40-bit PIO bias that we * use in the kernel through KSEG doesn't work in the page table * based user mappings. (43-bit KSEG sign extends the physical * address from bit 40 to hit the I/O bit - mapped addresses don't). * So make sure we get the 43-bit PIO bias. */ hose->sparse_mem_base = 0; hose->sparse_io_base = 0; hose->dense_mem_base = (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL; hose->dense_io_base = (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL; hose->config_space_base = TITAN_CONF(index); hose->index = index; hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS; hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1; hose->io_space->name = pci_io_names[index]; hose->io_space->flags = IORESOURCE_IO; hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS; hose->mem_space->end = hose->mem_space->start + 0xffffffff; hose->mem_space->name = pci_mem_names[index]; hose->mem_space->flags = IORESOURCE_MEM; if (request_resource(&ioport_resource, hose->io_space) < 0) printk(KERN_ERR "Failed to request IO on hose %d\n", index); if (request_resource(&iomem_resource, hose->mem_space) < 0) printk(KERN_ERR "Failed to request MEM on hose %d\n", index); /* * Save the existing PCI window translations. SRM will * need them when we go to reboot. */ saved_config[index].wsba[0] = port->wsba[0].csr; saved_config[index].wsm[0] = port->wsm[0].csr; saved_config[index].tba[0] = port->tba[0].csr; saved_config[index].wsba[1] = port->wsba[1].csr; saved_config[index].wsm[1] = port->wsm[1].csr; saved_config[index].tba[1] = port->tba[1].csr; saved_config[index].wsba[2] = port->wsba[2].csr; saved_config[index].wsm[2] = port->wsm[2].csr; saved_config[index].tba[2] = port->tba[2].csr; saved_config[index].wsba[3] = port->wsba[3].csr; saved_config[index].wsm[3] = port->wsm[3].csr; saved_config[index].tba[3] = port->tba[3].csr; /* * Set up the PCI to main memory translation windows. * * Note: Window 3 on Titan is Scatter-Gather ONLY. * * Window 0 is scatter-gather 8MB at 8MB (for isa) * Window 1 is direct access 1GB at 2GB * Window 2 is scatter-gather 1GB at 3GB */ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0); hose->sg_isa->align_entry = 8; /* 64KB for ISA */ hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0); hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */ port->wsba[0].csr = hose->sg_isa->dma_base | 3; port->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000; port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); port->wsba[1].csr = __direct_map_base | 1; port->wsm[1].csr = (__direct_map_size - 1) & 0xfff00000; port->tba[1].csr = 0; port->wsba[2].csr = hose->sg_pci->dma_base | 3; port->wsm[2].csr = (hose->sg_pci->size - 1) & 0xfff00000; port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes); port->wsba[3].csr = 0; /* Enable the Monster Window to make DAC pci64 possible. */ port->pctl.csr |= pctl_m_mwin; /* * If it's an AGP port, initialize agplastwr. */ if (titan_query_agp(port)) port->port_specific.a.agplastwr.csr = __direct_map_base; titan_pci_tbi(hose, 0, -1); } static void __init titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1) { titan_pchip1_present = TITAN_cchip->csc.csr & 1L<<14; /* Init the ports in hose order... */ titan_init_one_pachip_port(&pachip0->g_port, 0); /* hose 0 */ if (titan_pchip1_present) titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */ titan_init_one_pachip_port(&pachip0->a_port, 2); /* hose 2 */ if (titan_pchip1_present) titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */ } void __init titan_init_arch(void) { #if 0 printk("%s: titan_init_arch()\n", __func__); printk("%s: CChip registers:\n", __func__); printk("%s: CSR_CSC 0x%lx\n", __func__, TITAN_cchip->csc.csr); printk("%s: CSR_MTR 0x%lx\n", __func__, TITAN_cchip->mtr.csr); printk("%s: CSR_MISC 0x%lx\n", __func__, TITAN_cchip->misc.csr); printk("%s: CSR_DIM0 0x%lx\n", __func__, TITAN_cchip->dim0.csr); printk("%s: CSR_DIM1 0x%lx\n", __func__, TITAN_cchip->dim1.csr); printk("%s: CSR_DIR0 0x%lx\n", __func__, TITAN_cchip->dir0.csr); printk("%s: CSR_DIR1 0x%lx\n", __func__, TITAN_cchip->dir1.csr); printk("%s: CSR_DRIR 0x%lx\n", __func__, TITAN_cchip->drir.csr); printk("%s: DChip registers:\n", __func__); printk("%s: CSR_DSC 0x%lx\n", __func__, TITAN_dchip->dsc.csr); printk("%s: CSR_STR 0x%lx\n", __func__, TITAN_dchip->str.csr); printk("%s: CSR_DREV 0x%lx\n", __func__, TITAN_dchip->drev.csr); #endif boot_cpuid = __hard_smp_processor_id(); /* With multiple PCI busses, we play with I/O as physical addrs. */ ioport_resource.end = ~0UL; iomem_resource.end = ~0UL; /* PCI DMA Direct Mapping is 1GB at 2GB. */ __direct_map_base = 0x80000000; __direct_map_size = 0x40000000; /* Init the PA chip(s). */ titan_init_pachips(TITAN_pachip0, TITAN_pachip1); /* Check for graphic console location (if any). */ find_console_vga_hose(); } static void titan_kill_one_pachip_port(titan_pachip_port *port, int index) { port->wsba[0].csr = saved_config[index].wsba[0]; port->wsm[0].csr = saved_config[index].wsm[0]; port->tba[0].csr = saved_config[index].tba[0]; port->wsba[1].csr = saved_config[index].wsba[1]; port->wsm[1].csr = saved_config[index].wsm[1]; port->tba[1].csr = saved_config[index].tba[1]; port->wsba[2].csr = saved_config[index].wsba[2]; port->wsm[2].csr = saved_config[index].wsm[2]; port->tba[2].csr = saved_config[index].tba[2]; port->wsba[3].csr = saved_config[index].wsba[3]; port->wsm[3].csr = saved_config[index].wsm[3]; port->tba[3].csr = saved_config[index].tba[3]; } static void titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1) { if (titan_pchip1_present) { titan_kill_one_pachip_port(&pachip1->g_port, 1); titan_kill_one_pachip_port(&pachip1->a_port, 3); } titan_kill_one_pachip_port(&pachip0->g_port, 0); titan_kill_one_pachip_port(&pachip0->a_port, 2); } void titan_kill_arch(int mode) { titan_kill_pachips(TITAN_pachip0, TITAN_pachip1); } /* * IO map support. */ void __iomem * titan_ioportmap(unsigned long addr) { FIXUP_IOADDR_VGA(addr); return (void __iomem *)(addr + TITAN_IO_BIAS); } void __iomem * titan_ioremap(unsigned long addr, unsigned long size) { int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT; unsigned long baddr = addr & ~TITAN_HOSE_MASK; unsigned long last = baddr + size - 1; struct pci_controller *hose; struct vm_struct *area; unsigned long vaddr; unsigned long *ptes; unsigned long pfn; /* * Adjust the address and hose, if necessary. */ if (pci_vga_hose && __is_mem_vga(addr)) { h = pci_vga_hose->index; addr += pci_vga_hose->mem_space->start; } /* * Find the hose. */ for (hose = hose_head; hose; hose = hose->next) if (hose->index == h) break; if (!hose) return NULL; /* * Is it direct-mapped? */ if ((baddr >= __direct_map_base) && ((baddr + size - 1) < __direct_map_base + __direct_map_size)) { vaddr = addr - __direct_map_base + TITAN_MEM_BIAS; return (void __iomem *) vaddr; } /* * Check the scatter-gather arena. */ if (hose->sg_pci && baddr >= (unsigned long)hose->sg_pci->dma_base && last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){ /* * Adjust the limits (mappings must be page aligned) */ baddr -= hose->sg_pci->dma_base; last -= hose->sg_pci->dma_base; baddr &= PAGE_MASK; size = PAGE_ALIGN(last) - baddr; /* * Map it */ area = get_vm_area(size, VM_IOREMAP); if (!area) { printk("ioremap failed... no vm_area...\n"); return NULL; } ptes = hose->sg_pci->ptes; for (vaddr = (unsigned long)area->addr; baddr <= last; baddr += PAGE_SIZE, vaddr += PAGE_SIZE) { pfn = ptes[baddr >> PAGE_SHIFT]; if (!(pfn & 1)) { printk("ioremap failed... pte not valid...\n"); vfree(area->addr); return NULL; } pfn >>= 1; /* make it a true pfn */ if (__alpha_remap_area_pages(vaddr, pfn << PAGE_SHIFT, PAGE_SIZE, 0)) { printk("FAILED to remap_area_pages...\n"); vfree(area->addr); return NULL; } } flush_tlb_all(); vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); return (void __iomem *) vaddr; } /* Assume a legacy (read: VGA) address, and return appropriately. */ return (void __iomem *)(addr + TITAN_MEM_BIAS); } void titan_iounmap(volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr >= VMALLOC_START) vfree((void *)(PAGE_MASK & addr)); } int titan_is_mmio(const volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr >= VMALLOC_START) return 1; else return (addr & 0x100000000UL) == 0; } #ifndef CONFIG_ALPHA_GENERIC EXPORT_SYMBOL(titan_ioportmap); EXPORT_SYMBOL(titan_ioremap); EXPORT_SYMBOL(titan_iounmap); EXPORT_SYMBOL(titan_is_mmio); #endif /* * AGP GART Support. */ #include <linux/agp_backend.h> #include <asm/agp_backend.h> #include <linux/slab.h> #include <linux/delay.h> struct titan_agp_aperture { struct pci_iommu_arena *arena; long pg_start; long pg_count; }; static int titan_agp_setup(alpha_agp_info *agp) { struct titan_agp_aperture *aper; if (!alpha_agpgart_size) return -ENOMEM; aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL); if (aper == NULL) return -ENOMEM; aper->arena = agp->hose->sg_pci; aper->pg_count = alpha_agpgart_size / PAGE_SIZE; aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, aper->pg_count - 1); if (aper->pg_start < 0) { printk(KERN_ERR "Failed to reserve AGP memory\n"); kfree(aper); return -ENOMEM; } agp->aperture.bus_base = aper->arena->dma_base + aper->pg_start * PAGE_SIZE; agp->aperture.size = aper->pg_count * PAGE_SIZE; agp->aperture.sysdata = aper; return 0; } static void titan_agp_cleanup(alpha_agp_info *agp) { struct titan_agp_aperture *aper = agp->aperture.sysdata; int status; status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); if (status == -EBUSY) { printk(KERN_WARNING "Attempted to release bound AGP memory - unbinding\n"); iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); } if (status < 0) printk(KERN_ERR "Failed to release AGP memory\n"); kfree(aper); kfree(agp); } static int titan_agp_configure(alpha_agp_info *agp) { union TPAchipPCTL pctl; titan_pachip_port *port = agp->private; pctl.pctl_q_whole = port->pctl.csr; /* Side-Band Addressing? */ pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba; /* AGP Rate? */ pctl.pctl_r_bits.apctl_v_agp_rate = 0; /* 1x */ if (agp->mode.bits.rate & 2) pctl.pctl_r_bits.apctl_v_agp_rate = 1; /* 2x */ #if 0 if (agp->mode.bits.rate & 4) pctl.pctl_r_bits.apctl_v_agp_rate = 2; /* 4x */ #endif /* RQ Depth? */ pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2; pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7; /* * AGP Enable. */ pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable; /* Tell the user. */ printk("Enabling AGP: %dX%s\n", 1 << pctl.pctl_r_bits.apctl_v_agp_rate, pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : ""); /* Write it. */ port->pctl.csr = pctl.pctl_q_whole; /* And wait at least 5000 66MHz cycles (per Titan spec). */ udelay(100); return 0; } static int titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) { struct titan_agp_aperture *aper = agp->aperture.sysdata; return iommu_bind(aper->arena, aper->pg_start + pg_start, mem->page_count, mem->pages); } static int titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) { struct titan_agp_aperture *aper = agp->aperture.sysdata; return iommu_unbind(aper->arena, aper->pg_start + pg_start, mem->page_count); } static unsigned long titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr) { struct titan_agp_aperture *aper = agp->aperture.sysdata; unsigned long baddr = addr - aper->arena->dma_base; unsigned long pte; if (addr < agp->aperture.bus_base || addr >= agp->aperture.bus_base + agp->aperture.size) { printk("%s: addr out of range\n", __func__); return -EINVAL; } pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; if (!(pte & 1)) { printk("%s: pte not valid\n", __func__); return -EINVAL; } return (pte >> 1) << PAGE_SHIFT; } struct alpha_agp_ops titan_agp_ops = { .setup = titan_agp_setup, .cleanup = titan_agp_cleanup, .configure = titan_agp_configure, .bind = titan_agp_bind_memory, .unbind = titan_agp_unbind_memory, .translate = titan_agp_translate }; alpha_agp_info * titan_agp_info(void) { alpha_agp_info *agp; struct pci_controller *hose; titan_pachip_port *port; int hosenum = -1; union TPAchipPCTL pctl; /* * Find the AGP port. */ port = &TITAN_pachip0->a_port; if (titan_query_agp(port)) hosenum = 2; if (hosenum < 0 && titan_pchip1_present && titan_query_agp(port = &TITAN_pachip1->a_port)) hosenum = 3; /* * Find the hose the port is on. */ for (hose = hose_head; hose; hose = hose->next) if (hose->index == hosenum) break; if (!hose || !hose->sg_pci) return NULL; /* * Allocate the info structure. */ agp = kmalloc(sizeof(*agp), GFP_KERNEL); if (!agp) return NULL; /* * Fill it in. */ agp->hose = hose; agp->private = port; agp->ops = &titan_agp_ops; /* * Aperture - not configured until ops.setup(). * * FIXME - should we go ahead and allocate it here? */ agp->aperture.bus_base = 0; agp->aperture.size = 0; agp->aperture.sysdata = NULL; /* * Capabilities. */ agp->capability.lw = 0; agp->capability.bits.rate = 3; /* 2x, 1x */ agp->capability.bits.sba = 1; agp->capability.bits.rq = 7; /* 8 - 1 */ /* * Mode. */ pctl.pctl_q_whole = port->pctl.csr; agp->mode.lw = 0; agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate; agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en; agp->mode.bits.rq = 7; /* RQ Depth? */ agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en; return agp; }
gpl-2.0
youfearm3/gcc
gcc/testsuite/gfortran.fortran-torture/execute/nan_inf_fmt.f90
162
2361
!pr 12839- F2003 formatting of Inf /Nan ! Modified for PR47434 implicit none character*40 l character*12 fmt real zero, pos_inf, neg_inf, nan zero = 0.0 ! need a better way of generating these floating point ! exceptional constants. pos_inf = 1.0/zero neg_inf = -1.0/zero nan = zero/zero ! check a field width = 0 fmt = '(F0.0)' write(l,fmt=fmt)pos_inf if (l.ne.'Inf') call abort write(l,fmt=fmt)neg_inf if (l.ne.'-Inf') call abort write(l,fmt=fmt)nan if (l.ne.'NaN') call abort ! check a field width < 3 fmt = '(F2.0)' write(l,fmt=fmt)pos_inf if (l.ne.'**') call abort write(l,fmt=fmt)neg_inf if (l.ne.'**') call abort write(l,fmt=fmt)nan if (l.ne.'**') call abort ! check a field width = 3 fmt = '(F3.0)' write(l,fmt=fmt)pos_inf if (l.ne.'Inf') call abort write(l,fmt=fmt)neg_inf if (l.ne.'***') call abort write(l,fmt=fmt)nan if (l.ne.'NaN') call abort ! check a field width > 3 fmt = '(F4.0)' write(l,fmt=fmt)pos_inf if (l.ne.' Inf') call abort write(l,fmt=fmt)neg_inf if (l.ne.'-Inf') call abort write(l,fmt=fmt)nan if (l.ne.' NaN') call abort ! check a field width = 7 fmt = '(F7.0)' write(l,fmt=fmt)pos_inf if (l.ne.' Inf') call abort write(l,fmt=fmt)neg_inf if (l.ne.' -Inf') call abort write(l,fmt=fmt)nan if (l.ne.' NaN') call abort ! check a field width = 8 fmt = '(F8.0)' write(l,fmt=fmt)pos_inf if (l.ne.'Infinity') call abort write(l,fmt=fmt)neg_inf if (l.ne.' -Inf') call abort write(l,fmt=fmt)nan if (l.ne.' NaN') call abort ! check a field width = 9 fmt = '(F9.0)' write(l,fmt=fmt)pos_inf if (l.ne.' Infinity') call abort write(l,fmt=fmt)neg_inf if (l.ne.'-Infinity') call abort write(l,fmt=fmt)nan if (l.ne.' NaN') call abort ! check a field width = 14 fmt = '(F14.0)' write(l,fmt=fmt)pos_inf if (l.ne.' Infinity') call abort write(l,fmt=fmt)neg_inf if (l.ne.' -Infinity') call abort write(l,fmt=fmt)nan if (l.ne.' NaN') call abort end
gpl-2.0
AscendG630-DEV/android_kernel_g630u20
fs/sysv/inode.c
162
9932
/* * linux/fs/sysv/inode.c * * minix/inode.c * Copyright (C) 1991, 1992 Linus Torvalds * * xenix/inode.c * Copyright (C) 1992 Doug Evans * * coh/inode.c * Copyright (C) 1993 Pascal Haible, Bruno Haible * * sysv/inode.c * Copyright (C) 1993 Paul B. Monday * * sysv/inode.c * Copyright (C) 1993 Bruno Haible * Copyright (C) 1997, 1998 Krzysztof G. Baranowski * * This file contains code for allocating/freeing inodes and for read/writing * the superblock. */ #include <linux/highuid.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/buffer_head.h> #include <linux/vfs.h> #include <linux/writeback.h> #include <linux/namei.h> #include <asm/byteorder.h> #include "sysv.h" static int sysv_sync_fs(struct super_block *sb, int wait) { struct sysv_sb_info *sbi = SYSV_SB(sb); unsigned long time = get_seconds(), old_time; lock_super(sb); /* * If we are going to write out the super block, * then attach current time stamp. * But if the filesystem was marked clean, keep it clean. */ sb->s_dirt = 0; old_time = fs32_to_cpu(sbi, *sbi->s_sb_time); if (sbi->s_type == FSTYPE_SYSV4) { if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time)) *sbi->s_sb_state = cpu_to_fs32(sbi, 0x7c269d38 - time); *sbi->s_sb_time = cpu_to_fs32(sbi, time); mark_buffer_dirty(sbi->s_bh2); } unlock_super(sb); return 0; } static void sysv_write_super(struct super_block *sb) { if (!(sb->s_flags & MS_RDONLY)) sysv_sync_fs(sb, 1); else sb->s_dirt = 0; } static int sysv_remount(struct super_block *sb, int *flags, char *data) { struct sysv_sb_info *sbi = SYSV_SB(sb); lock_super(sb); if (sbi->s_forced_ro) *flags |= MS_RDONLY; if (*flags & MS_RDONLY) sysv_write_super(sb); unlock_super(sb); return 0; } static void sysv_put_super(struct super_block *sb) { struct sysv_sb_info *sbi = SYSV_SB(sb); if (sb->s_dirt) sysv_write_super(sb); if (!(sb->s_flags & MS_RDONLY)) { /* XXX ext2 also updates the state here */ mark_buffer_dirty(sbi->s_bh1); if (sbi->s_bh1 != sbi->s_bh2) mark_buffer_dirty(sbi->s_bh2); } brelse(sbi->s_bh1); if (sbi->s_bh1 != sbi->s_bh2) brelse(sbi->s_bh2); kfree(sbi); } static int sysv_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = sb->s_magic; buf->f_bsize = sb->s_blocksize; buf->f_blocks = sbi->s_ndatazones; buf->f_bavail = buf->f_bfree = sysv_count_free_blocks(sb); buf->f_files = sbi->s_ninodes; buf->f_ffree = sysv_count_free_inodes(sb); buf->f_namelen = SYSV_NAMELEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); return 0; } /* * NXI <-> N0XI for PDP, XIN <-> XIN0 for le32, NIX <-> 0NIX for be32 */ static inline void read3byte(struct sysv_sb_info *sbi, unsigned char * from, unsigned char * to) { if (sbi->s_bytesex == BYTESEX_PDP) { to[0] = from[0]; to[1] = 0; to[2] = from[1]; to[3] = from[2]; } else if (sbi->s_bytesex == BYTESEX_LE) { to[0] = from[0]; to[1] = from[1]; to[2] = from[2]; to[3] = 0; } else { to[0] = 0; to[1] = from[0]; to[2] = from[1]; to[3] = from[2]; } } static inline void write3byte(struct sysv_sb_info *sbi, unsigned char * from, unsigned char * to) { if (sbi->s_bytesex == BYTESEX_PDP) { to[0] = from[0]; to[1] = from[2]; to[2] = from[3]; } else if (sbi->s_bytesex == BYTESEX_LE) { to[0] = from[0]; to[1] = from[1]; to[2] = from[2]; } else { to[0] = from[1]; to[1] = from[2]; to[2] = from[3]; } } static const struct inode_operations sysv_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .getattr = sysv_getattr, }; void sysv_set_inode(struct inode *inode, dev_t rdev) { if (S_ISREG(inode->i_mode)) { inode->i_op = &sysv_file_inode_operations; inode->i_fop = &sysv_file_operations; inode->i_mapping->a_ops = &sysv_aops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &sysv_dir_inode_operations; inode->i_fop = &sysv_dir_operations; inode->i_mapping->a_ops = &sysv_aops; } else if (S_ISLNK(inode->i_mode)) { if (inode->i_blocks) { inode->i_op = &sysv_symlink_inode_operations; inode->i_mapping->a_ops = &sysv_aops; } else { inode->i_op = &sysv_fast_symlink_inode_operations; nd_terminate_link(SYSV_I(inode)->i_data, inode->i_size, sizeof(SYSV_I(inode)->i_data) - 1); } } else init_special_inode(inode, inode->i_mode, rdev); } struct inode *sysv_iget(struct super_block *sb, unsigned int ino) { struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; struct sysv_inode_info * si; struct inode *inode; unsigned int block; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %d is out of range\n", sb->s_id, ino); return ERR_PTR(-EIO); } inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("Major problem: unable to read inode from dev %s\n", inode->i_sb->s_id); goto bad_inode; } /* SystemV FS: kludge permissions if ino==SYSV_ROOT_INO ?? */ inode->i_mode = fs16_to_cpu(sbi, raw_inode->i_mode); inode->i_uid = (uid_t)fs16_to_cpu(sbi, raw_inode->i_uid); inode->i_gid = (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid); set_nlink(inode, fs16_to_cpu(sbi, raw_inode->i_nlink)); inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size); inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_atime); inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_mtime); inode->i_ctime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_ctime); inode->i_ctime.tv_nsec = 0; inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_nsec = 0; inode->i_blocks = 0; si = SYSV_I(inode); for (block = 0; block < 10+1+1+1; block++) read3byte(sbi, &raw_inode->i_data[3*block], (u8 *)&si->i_data[block]); brelse(bh); si->i_dir_start_lookup = 0; if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) sysv_set_inode(inode, old_decode_dev(fs32_to_cpu(sbi, si->i_data[0]))); else sysv_set_inode(inode, 0); unlock_new_inode(inode); return inode; bad_inode: iget_failed(inode); return ERR_PTR(-EIO); } static int __sysv_write_inode(struct inode *inode, int wait) { struct super_block * sb = inode->i_sb; struct sysv_sb_info * sbi = SYSV_SB(sb); struct buffer_head * bh; struct sysv_inode * raw_inode; struct sysv_inode_info * si; unsigned int ino, block; int err = 0; ino = inode->i_ino; if (!ino || ino > sbi->s_ninodes) { printk("Bad inode number on dev %s: %d is out of range\n", inode->i_sb->s_id, ino); return -EIO; } raw_inode = sysv_raw_inode(sb, ino, &bh); if (!raw_inode) { printk("unable to read i-node block\n"); return -EIO; } raw_inode->i_mode = cpu_to_fs16(sbi, inode->i_mode); raw_inode->i_uid = cpu_to_fs16(sbi, fs_high2lowuid(inode->i_uid)); raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(inode->i_gid)); raw_inode->i_nlink = cpu_to_fs16(sbi, inode->i_nlink); raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size); raw_inode->i_atime = cpu_to_fs32(sbi, inode->i_atime.tv_sec); raw_inode->i_mtime = cpu_to_fs32(sbi, inode->i_mtime.tv_sec); raw_inode->i_ctime = cpu_to_fs32(sbi, inode->i_ctime.tv_sec); si = SYSV_I(inode); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) si->i_data[0] = cpu_to_fs32(sbi, old_encode_dev(inode->i_rdev)); for (block = 0; block < 10+1+1+1; block++) write3byte(sbi, (u8 *)&si->i_data[block], &raw_inode->i_data[3*block]); mark_buffer_dirty(bh); if (wait) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { printk ("IO error syncing sysv inode [%s:%08x]\n", sb->s_id, ino); err = -EIO; } } brelse(bh); return 0; } int sysv_write_inode(struct inode *inode, struct writeback_control *wbc) { return __sysv_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } int sysv_sync_inode(struct inode *inode) { return __sysv_write_inode(inode, 1); } static void sysv_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); if (!inode->i_nlink) { inode->i_size = 0; sysv_truncate(inode); } invalidate_inode_buffers(inode); end_writeback(inode); if (!inode->i_nlink) sysv_free_inode(inode); } static struct kmem_cache *sysv_inode_cachep; static struct inode *sysv_alloc_inode(struct super_block *sb) { struct sysv_inode_info *si; si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL); if (!si) return NULL; return &si->vfs_inode; } static void sysv_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(sysv_inode_cachep, SYSV_I(inode)); } static void sysv_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, sysv_i_callback); } static void init_once(void *p) { struct sysv_inode_info *si = (struct sysv_inode_info *)p; inode_init_once(&si->vfs_inode); } const struct super_operations sysv_sops = { .alloc_inode = sysv_alloc_inode, .destroy_inode = sysv_destroy_inode, .write_inode = sysv_write_inode, .evict_inode = sysv_evict_inode, .put_super = sysv_put_super, .write_super = sysv_write_super, .sync_fs = sysv_sync_fs, .remount_fs = sysv_remount, .statfs = sysv_statfs, }; int __init sysv_init_icache(void) { sysv_inode_cachep = kmem_cache_create("sysv_inode_cache", sizeof(struct sysv_inode_info), 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, init_once); if (!sysv_inode_cachep) return -ENOMEM; return 0; } void sysv_destroy_icache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(sysv_inode_cachep); }
gpl-2.0
bradfa/linux
net/sched/sch_prio.c
162
8091
/* * net/sched/sch_prio.c Simple 3-band priority "scheduler". * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>: * Init -- EINVAL when opt undefined */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/pkt_sched.h> struct prio_sched_data { int bands; struct tcf_proto __rcu *filter_list; u8 prio2band[TC_PRIO_MAX+1]; struct Qdisc *queues[TCQ_PRIO_BANDS]; }; static struct Qdisc * prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct prio_sched_data *q = qdisc_priv(sch); u32 band = skb->priority; struct tcf_result res; struct tcf_proto *fl; int err; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (TC_H_MAJ(skb->priority) != sch->handle) { fl = rcu_dereference_bh(q->filter_list); err = tc_classify(skb, fl, &res, false); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } #endif if (!fl || err < 0) { if (TC_H_MAJ(band)) band = 0; return q->queues[q->prio2band[band & TC_PRIO_MAX]]; } band = res.classid; } band = TC_H_MIN(band) - 1; if (band >= q->bands) return q->queues[q->prio2band[0]]; return q->queues[band]; } static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct Qdisc *qdisc; int ret; qdisc = prio_classify(skb, sch, &ret); #ifdef CONFIG_NET_CLS_ACT if (qdisc == NULL) { if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); kfree_skb(skb); return ret; } #endif ret = qdisc_enqueue(skb, qdisc, to_free); if (ret == NET_XMIT_SUCCESS) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; } if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); return ret; } static struct sk_buff *prio_peek(struct Qdisc *sch) { struct prio_sched_data *q = qdisc_priv(sch); int prio; for (prio = 0; prio < q->bands; prio++) { struct Qdisc *qdisc = q->queues[prio]; struct sk_buff *skb = qdisc->ops->peek(qdisc); if (skb) return skb; } return NULL; } static struct sk_buff *prio_dequeue(struct Qdisc *sch) { struct prio_sched_data *q = qdisc_priv(sch); int prio; for (prio = 0; prio < q->bands; prio++) { struct Qdisc *qdisc = q->queues[prio]; struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); if (skb) { qdisc_bstats_update(sch, skb); qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; return skb; } } return NULL; } static void prio_reset(struct Qdisc *sch) { int prio; struct prio_sched_data *q = qdisc_priv(sch); for (prio = 0; prio < q->bands; prio++) qdisc_reset(q->queues[prio]); sch->qstats.backlog = 0; sch->q.qlen = 0; } static void prio_destroy(struct Qdisc *sch) { int prio; struct prio_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); for (prio = 0; prio < q->bands; prio++) qdisc_destroy(q->queues[prio]); } static int prio_tune(struct Qdisc *sch, struct nlattr *opt) { struct prio_sched_data *q = qdisc_priv(sch); struct Qdisc *queues[TCQ_PRIO_BANDS]; int oldbands = q->bands, i; struct tc_prio_qopt *qopt; if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) return -EINVAL; for (i = 0; i <= TC_PRIO_MAX; i++) { if (qopt->priomap[i] >= qopt->bands) return -EINVAL; } /* Before commit, make sure we can allocate all new qdiscs */ for (i = oldbands; i < qopt->bands; i++) { queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, i + 1)); if (!queues[i]) { while (i > oldbands) qdisc_destroy(queues[--i]); return -ENOMEM; } } sch_tree_lock(sch); q->bands = qopt->bands; memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); for (i = q->bands; i < oldbands; i++) { struct Qdisc *child = q->queues[i]; qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog); qdisc_destroy(child); } for (i = oldbands; i < q->bands; i++) q->queues[i] = queues[i]; sch_tree_unlock(sch); return 0; } static int prio_init(struct Qdisc *sch, struct nlattr *opt) { if (!opt) return -EINVAL; return prio_tune(sch, opt); } static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) { struct prio_sched_data *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_prio_qopt opt; opt.bands = q->bands; memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1); if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) goto nla_put_failure; return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct prio_sched_data *q = qdisc_priv(sch); unsigned long band = arg - 1; if (new == NULL) new = &noop_qdisc; *old = qdisc_replace(sch, new, &q->queues[band]); return 0; } static struct Qdisc * prio_leaf(struct Qdisc *sch, unsigned long arg) { struct prio_sched_data *q = qdisc_priv(sch); unsigned long band = arg - 1; return q->queues[band]; } static unsigned long prio_get(struct Qdisc *sch, u32 classid) { struct prio_sched_data *q = qdisc_priv(sch); unsigned long band = TC_H_MIN(classid); if (band - 1 >= q->bands) return 0; return band; } static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { return prio_get(sch, classid); } static void prio_put(struct Qdisc *q, unsigned long cl) { } static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct prio_sched_data *q = qdisc_priv(sch); tcm->tcm_handle |= TC_H_MIN(cl); tcm->tcm_info = q->queues[cl-1]->handle; return 0; } static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct prio_sched_data *q = qdisc_priv(sch); struct Qdisc *cl_q; cl_q = q->queues[cl - 1]; if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl_q->bstats) < 0 || gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) return -1; return 0; } static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct prio_sched_data *q = qdisc_priv(sch); int prio; if (arg->stop) return; for (prio = 0; prio < q->bands; prio++) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, prio + 1, arg) < 0) { arg->stop = 1; break; } arg->count++; } } static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch, unsigned long cl) { struct prio_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return &q->filter_list; } static const struct Qdisc_class_ops prio_class_ops = { .graft = prio_graft, .leaf = prio_leaf, .get = prio_get, .put = prio_put, .walk = prio_walk, .tcf_chain = prio_find_tcf, .bind_tcf = prio_bind, .unbind_tcf = prio_put, .dump = prio_dump_class, .dump_stats = prio_dump_class_stats, }; static struct Qdisc_ops prio_qdisc_ops __read_mostly = { .next = NULL, .cl_ops = &prio_class_ops, .id = "prio", .priv_size = sizeof(struct prio_sched_data), .enqueue = prio_enqueue, .dequeue = prio_dequeue, .peek = prio_peek, .init = prio_init, .reset = prio_reset, .destroy = prio_destroy, .change = prio_tune, .dump = prio_dump, .owner = THIS_MODULE, }; static int __init prio_module_init(void) { return register_qdisc(&prio_qdisc_ops); } static void __exit prio_module_exit(void) { unregister_qdisc(&prio_qdisc_ops); } module_init(prio_module_init) module_exit(prio_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
gototem/kernel
arch/powerpc/platforms/powernv/pci-ioda.c
162
34497
/* * Support PCI/PCIe on PowerNV platforms * * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #undef DEBUG #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/msi.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/msi_bitmap.h> #include <asm/ppc-pci.h> #include <asm/opal.h> #include <asm/iommu.h> #include <asm/tce.h> #include <asm/xics.h> #include "powernv.h" #include "pci.h" #define define_pe_printk_level(func, kern_level) \ static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \ { \ struct va_format vaf; \ va_list args; \ char pfix[32]; \ int r; \ \ va_start(args, fmt); \ \ vaf.fmt = fmt; \ vaf.va = &args; \ \ if (pe->pdev) \ strlcpy(pfix, dev_name(&pe->pdev->dev), \ sizeof(pfix)); \ else \ sprintf(pfix, "%04x:%02x ", \ pci_domain_nr(pe->pbus), \ pe->pbus->number); \ r = printk(kern_level "pci %s: [PE# %.3d] %pV", \ pfix, pe->pe_number, &vaf); \ \ va_end(args); \ \ return r; \ } \ define_pe_printk_level(pe_err, KERN_ERR); define_pe_printk_level(pe_warn, KERN_WARNING); define_pe_printk_level(pe_info, KERN_INFO); static int pnv_ioda_alloc_pe(struct pnv_phb *phb) { unsigned long pe; do { pe = find_next_zero_bit(phb->ioda.pe_alloc, phb->ioda.total_pe, 0); if (pe >= phb->ioda.total_pe) return IODA_INVALID_PE; } while(test_and_set_bit(pe, phb->ioda.pe_alloc)); phb->ioda.pe_array[pe].phb = phb; phb->ioda.pe_array[pe].pe_number = pe; return pe; } static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe) { WARN_ON(phb->ioda.pe_array[pe].pdev); memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe)); clear_bit(pe, phb->ioda.pe_alloc); } /* Currently those 2 are only used when MSIs are enabled, this will change * but in the meantime, we need to protect them to avoid warnings */ #ifdef CONFIG_PCI_MSI static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; struct pci_dn *pdn = pci_get_pdn(dev); if (!pdn) return NULL; if (pdn->pe_number == IODA_INVALID_PE) return NULL; return &phb->ioda.pe_array[pdn->pe_number]; } #endif /* CONFIG_PCI_MSI */ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { struct pci_dev *parent; uint8_t bcomp, dcomp, fcomp; long rc, rid_end, rid; /* Bus validation ? */ if (pe->pbus) { int count; dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; parent = pe->pbus->self; if (pe->flags & PNV_IODA_PE_BUS_ALL) count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; else count = 1; switch(count) { case 1: bcomp = OpalPciBusAll; break; case 2: bcomp = OpalPciBus7Bits; break; case 4: bcomp = OpalPciBus6Bits; break; case 8: bcomp = OpalPciBus5Bits; break; case 16: bcomp = OpalPciBus4Bits; break; case 32: bcomp = OpalPciBus3Bits; break; default: pr_err("%s: Number of subordinate busses %d" " unsupported\n", pci_name(pe->pbus->self), count); /* Do an exact match only */ bcomp = OpalPciBusAll; } rid_end = pe->rid + (count << 8); } else { parent = pe->pdev->bus->self; bcomp = OpalPciBusAll; dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; rid_end = pe->rid + 1; } /* Associate PE in PELT */ rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, bcomp, dcomp, fcomp, OPAL_MAP_PE); if (rc) { pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); return -ENXIO; } opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); /* Add to all parents PELT-V */ while (parent) { struct pci_dn *pdn = pci_get_pdn(parent); if (pdn && pdn->pe_number != IODA_INVALID_PE) { rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, pe->pe_number, OPAL_ADD_PE_TO_DOMAIN); /* XXX What to do in case of error ? */ } parent = parent->bus->self; } /* Setup reverse map */ for (rid = pe->rid; rid < rid_end; rid++) phb->ioda.pe_rmap[rid] = pe->pe_number; /* Setup one MVTs on IODA1 */ if (phb->type == PNV_PHB_IODA1) { pe->mve_number = pe->pe_number; rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number); if (rc) { pe_err(pe, "OPAL error %ld setting up MVE %d\n", rc, pe->mve_number); pe->mve_number = -1; } else { rc = opal_pci_set_mve_enable(phb->opal_id, pe->mve_number, OPAL_ENABLE_MVE); if (rc) { pe_err(pe, "OPAL error %ld enabling MVE %d\n", rc, pe->mve_number); pe->mve_number = -1; } } } else if (phb->type == PNV_PHB_IODA2) pe->mve_number = 0; return 0; } static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { struct pnv_ioda_pe *lpe; list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) { if (lpe->dma_weight < pe->dma_weight) { list_add_tail(&pe->dma_link, &lpe->dma_link); return; } } list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list); } static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev) { /* This is quite simplistic. The "base" weight of a device * is 10. 0 means no DMA is to be accounted for it. */ /* If it's a bridge, no DMA */ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) return 0; /* Reduce the weight of slow USB controllers */ if (dev->class == PCI_CLASS_SERIAL_USB_UHCI || dev->class == PCI_CLASS_SERIAL_USB_OHCI || dev->class == PCI_CLASS_SERIAL_USB_EHCI) return 3; /* Increase the weight of RAID (includes Obsidian) */ if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID) return 15; /* Default */ return 10; } #if 0 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; struct pci_dn *pdn = pci_get_pdn(dev); struct pnv_ioda_pe *pe; int pe_num; if (!pdn) { pr_err("%s: Device tree node not associated properly\n", pci_name(dev)); return NULL; } if (pdn->pe_number != IODA_INVALID_PE) return NULL; /* PE#0 has been pre-set */ if (dev->bus->number == 0) pe_num = 0; else pe_num = pnv_ioda_alloc_pe(phb); if (pe_num == IODA_INVALID_PE) { pr_warning("%s: Not enough PE# available, disabling device\n", pci_name(dev)); return NULL; } /* NOTE: We get only one ref to the pci_dev for the pdn, not for the * pointer in the PE data structure, both should be destroyed at the * same time. However, this needs to be looked at more closely again * once we actually start removing things (Hotplug, SR-IOV, ...) * * At some point we want to remove the PDN completely anyways */ pe = &phb->ioda.pe_array[pe_num]; pci_dev_get(dev); pdn->pcidev = dev; pdn->pe_number = pe_num; pe->pdev = dev; pe->pbus = NULL; pe->tce32_seg = -1; pe->mve_number = -1; pe->rid = dev->bus->number << 8 | pdn->devfn; pe_info(pe, "Associated device to PE\n"); if (pnv_ioda_configure_pe(phb, pe)) { /* XXX What do we do here ? */ if (pe_num) pnv_ioda_free_pe(phb, pe_num); pdn->pe_number = IODA_INVALID_PE; pe->pdev = NULL; pci_dev_put(dev); return NULL; } /* Assign a DMA weight to the device */ pe->dma_weight = pnv_ioda_dma_weight(dev); if (pe->dma_weight != 0) { phb->ioda.dma_weight += pe->dma_weight; phb->ioda.dma_pe_count++; } /* Link the PE */ pnv_ioda_link_pe_by_weight(phb, pe); return pe; } #endif /* Useful for SRIOV case */ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { struct pci_dn *pdn = pci_get_pdn(dev); if (pdn == NULL) { pr_warn("%s: No device node associated with device !\n", pci_name(dev)); continue; } pci_dev_get(dev); pdn->pcidev = dev; pdn->pe_number = pe->pe_number; pe->dma_weight += pnv_ioda_dma_weight(dev); if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) pnv_ioda_setup_same_PE(dev->subordinate, pe); } } /* * There're 2 types of PCI bus sensitive PEs: One that is compromised of * single PCI bus. Another one that contains the primary PCI bus and its * subordinate PCI devices and buses. The second type of PE is normally * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports. */ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all) { struct pci_controller *hose = pci_bus_to_host(bus); struct pnv_phb *phb = hose->private_data; struct pnv_ioda_pe *pe; int pe_num; pe_num = pnv_ioda_alloc_pe(phb); if (pe_num == IODA_INVALID_PE) { pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n", __func__, pci_domain_nr(bus), bus->number); return; } pe = &phb->ioda.pe_array[pe_num]; pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS); pe->pbus = bus; pe->pdev = NULL; pe->tce32_seg = -1; pe->mve_number = -1; pe->rid = bus->busn_res.start << 8; pe->dma_weight = 0; if (all) pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n", bus->busn_res.start, bus->busn_res.end, pe_num); else pe_info(pe, "Secondary bus %d associated with PE#%d\n", bus->busn_res.start, pe_num); if (pnv_ioda_configure_pe(phb, pe)) { /* XXX What do we do here ? */ if (pe_num) pnv_ioda_free_pe(phb, pe_num); pe->pbus = NULL; return; } /* Associate it with all child devices */ pnv_ioda_setup_same_PE(bus, pe); /* Put PE to the list */ list_add_tail(&pe->list, &phb->ioda.pe_list); /* Account for one DMA PE if at least one DMA capable device exist * below the bridge */ if (pe->dma_weight != 0) { phb->ioda.dma_weight += pe->dma_weight; phb->ioda.dma_pe_count++; } /* Link the PE */ pnv_ioda_link_pe_by_weight(phb, pe); } static void pnv_ioda_setup_PEs(struct pci_bus *bus) { struct pci_dev *dev; pnv_ioda_setup_bus_PE(bus, 0); list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->subordinate) { if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) pnv_ioda_setup_bus_PE(dev->subordinate, 1); else pnv_ioda_setup_PEs(dev->subordinate); } } } /* * Configure PEs so that the downstream PCI buses and devices * could have their associated PE#. Unfortunately, we didn't * figure out the way to identify the PLX bridge yet. So we * simply put the PCI bus and the subordinate behind the root * port to PE# here. The game rule here is expected to be changed * as soon as we can detected PLX bridge correctly. */ static void pnv_pci_ioda_setup_PEs(void) { struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { pnv_ioda_setup_PEs(hose->bus); } } static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev) { struct pci_dn *pdn = pci_get_pdn(pdev); struct pnv_ioda_pe *pe; /* * The function can be called while the PE# * hasn't been assigned. Do nothing for the * case. */ if (!pdn || pdn->pe_number == IODA_INVALID_PE) return; pe = &phb->ioda.pe_array[pdn->pe_number]; set_iommu_table_base(&pdev->dev, &pe->tce32_table); } static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { set_iommu_table_base(&dev->dev, &pe->tce32_table); if (dev->subordinate) pnv_ioda_setup_bus_dma(pe, dev->subordinate); } } static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl, u64 *startp, u64 *endp) { u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; unsigned long start, end, inc; start = __pa(startp); end = __pa(endp); /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */ if (tbl->it_busno) { start <<= 12; end <<= 12; inc = 128 << 12; start |= tbl->it_busno; end |= tbl->it_busno; } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) { /* p7ioc-style invalidation, 2 TCEs per write */ start |= (1ull << 63); end |= (1ull << 63); inc = 16; } else { /* Default (older HW) */ inc = 128; } end |= inc - 1; /* round up end to be different than start */ mb(); /* Ensure above stores are visible */ while (start <= end) { __raw_writeq(start, invalidate); start += inc; } /* * The iommu layer will do another mb() for us on build() * and we don't care on free() */ } static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe, struct iommu_table *tbl, u64 *startp, u64 *endp) { unsigned long start, end, inc; u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; /* We'll invalidate DMA address in PE scope */ start = 0x2ul << 60; start |= (pe->pe_number & 0xFF); end = start; /* Figure out the start, end and step */ inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64)); start |= (inc << 12); inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64)); end |= (inc << 12); inc = (0x1ul << 12); mb(); while (start <= end) { __raw_writeq(start, invalidate); start += inc; } } void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, u64 *startp, u64 *endp) { struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe, tce32_table); struct pnv_phb *phb = pe->phb; if (phb->type == PNV_PHB_IODA1) pnv_pci_ioda1_tce_invalidate(tbl, startp, endp); else pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp); } static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe, unsigned int base, unsigned int segs) { struct page *tce_mem = NULL; const __be64 *swinvp; struct iommu_table *tbl; unsigned int i; int64_t rc; void *addr; /* 256M DMA window, 4K TCE pages, 8 bytes TCE */ #define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8) /* XXX FIXME: Handle 64-bit only DMA devices */ /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */ /* XXX FIXME: Allocate multi-level tables on PHB3 */ /* We shouldn't already have a 32-bit DMA associated */ if (WARN_ON(pe->tce32_seg >= 0)) return; /* Grab a 32-bit TCE table */ pe->tce32_seg = base; pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n", (base << 28), ((base + segs) << 28) - 1); /* XXX Currently, we allocate one big contiguous table for the * TCEs. We only really need one chunk per 256M of TCE space * (ie per segment) but that's an optimization for later, it * requires some added smarts with our get/put_tce implementation */ tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL, get_order(TCE32_TABLE_SIZE * segs)); if (!tce_mem) { pe_err(pe, " Failed to allocate a 32-bit TCE memory\n"); goto fail; } addr = page_address(tce_mem); memset(addr, 0, TCE32_TABLE_SIZE * segs); /* Configure HW */ for (i = 0; i < segs; i++) { rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, base + i, 1, __pa(addr) + TCE32_TABLE_SIZE * i, TCE32_TABLE_SIZE, 0x1000); if (rc) { pe_err(pe, " Failed to configure 32-bit TCE table," " err %ld\n", rc); goto fail; } } /* Setup linux iommu table */ tbl = &pe->tce32_table; pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs, base << 28); /* OPAL variant of P7IOC SW invalidated TCEs */ swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL); if (swinvp) { /* We need a couple more fields -- an address and a data * to or. Since the bus is only printed out on table free * errors, and on the first pass the data will be a relative * bus number, print that out instead. */ tbl->it_busno = 0; tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8); tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE | TCE_PCI_SWINV_PAIR; } iommu_init_table(tbl, phb->hose->node); if (pe->pdev) set_iommu_table_base(&pe->pdev->dev, tbl); else pnv_ioda_setup_bus_dma(pe, pe->pbus); return; fail: /* XXX Failure: Try to fallback to 64-bit only ? */ if (pe->tce32_seg >= 0) pe->tce32_seg = -1; if (tce_mem) __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); } static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { struct page *tce_mem = NULL; void *addr; const __be64 *swinvp; struct iommu_table *tbl; unsigned int tce_table_size, end; int64_t rc; /* We shouldn't already have a 32-bit DMA associated */ if (WARN_ON(pe->tce32_seg >= 0)) return; /* The PE will reserve all possible 32-bits space */ pe->tce32_seg = 0; end = (1 << ilog2(phb->ioda.m32_pci_base)); tce_table_size = (end / 0x1000) * 8; pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n", end); /* Allocate TCE table */ tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL, get_order(tce_table_size)); if (!tce_mem) { pe_err(pe, "Failed to allocate a 32-bit TCE memory\n"); goto fail; } addr = page_address(tce_mem); memset(addr, 0, tce_table_size); /* * Map TCE table through TVT. The TVE index is the PE number * shifted by 1 bit for 32-bits DMA space. */ rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, pe->pe_number << 1, 1, __pa(addr), tce_table_size, 0x1000); if (rc) { pe_err(pe, "Failed to configure 32-bit TCE table," " err %ld\n", rc); goto fail; } /* Setup linux iommu table */ tbl = &pe->tce32_table; pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0); /* OPAL variant of PHB3 invalidated TCEs */ swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL); if (swinvp) { /* We need a couple more fields -- an address and a data * to or. Since the bus is only printed out on table free * errors, and on the first pass the data will be a relative * bus number, print that out instead. */ tbl->it_busno = 0; tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8); tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; } iommu_init_table(tbl, phb->hose->node); if (pe->pdev) set_iommu_table_base(&pe->pdev->dev, tbl); else pnv_ioda_setup_bus_dma(pe, pe->pbus); return; fail: if (pe->tce32_seg >= 0) pe->tce32_seg = -1; if (tce_mem) __free_pages(tce_mem, get_order(tce_table_size)); } static void pnv_ioda_setup_dma(struct pnv_phb *phb) { struct pci_controller *hose = phb->hose; unsigned int residual, remaining, segs, tw, base; struct pnv_ioda_pe *pe; /* If we have more PE# than segments available, hand out one * per PE until we run out and let the rest fail. If not, * then we assign at least one segment per PE, plus more based * on the amount of devices under that PE */ if (phb->ioda.dma_pe_count > phb->ioda.tce32_count) residual = 0; else residual = phb->ioda.tce32_count - phb->ioda.dma_pe_count; pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n", hose->global_number, phb->ioda.tce32_count); pr_info("PCI: %d PE# for a total weight of %d\n", phb->ioda.dma_pe_count, phb->ioda.dma_weight); /* Walk our PE list and configure their DMA segments, hand them * out one base segment plus any residual segments based on * weight */ remaining = phb->ioda.tce32_count; tw = phb->ioda.dma_weight; base = 0; list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) { if (!pe->dma_weight) continue; if (!remaining) { pe_warn(pe, "No DMA32 resources available\n"); continue; } segs = 1; if (residual) { segs += ((pe->dma_weight * residual) + (tw / 2)) / tw; if (segs > remaining) segs = remaining; } /* * For IODA2 compliant PHB3, we needn't care about the weight. * The all available 32-bits DMA space will be assigned to * the specific PE. */ if (phb->type == PNV_PHB_IODA1) { pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n", pe->dma_weight, segs); pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs); } else { pe_info(pe, "Assign DMA32 space\n"); segs = 0; pnv_pci_ioda2_setup_dma_pe(phb, pe); } remaining -= segs; base += segs; } } #ifdef CONFIG_PCI_MSI static void pnv_ioda2_msi_eoi(struct irq_data *d) { unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); struct irq_chip *chip = irq_data_get_irq_chip(d); struct pnv_phb *phb = container_of(chip, struct pnv_phb, ioda.irq_chip); int64_t rc; rc = opal_pci_msi_eoi(phb->opal_id, hw_irq); WARN_ON_ONCE(rc); icp_native_eoi(d); } static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, unsigned int hwirq, unsigned int virq, unsigned int is_64, struct msi_msg *msg) { struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); struct pci_dn *pdn = pci_get_pdn(dev); struct irq_data *idata; struct irq_chip *ichip; unsigned int xive_num = hwirq - phb->msi_base; uint64_t addr64; uint32_t addr32, data; int rc; /* No PE assigned ? bail out ... no MSI for you ! */ if (pe == NULL) return -ENXIO; /* Check if we have an MVE */ if (pe->mve_number < 0) return -ENXIO; /* Force 32-bit MSI on some broken devices */ if (pdn && pdn->force_32bit_msi) is_64 = 0; /* Assign XIVE to PE */ rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); if (rc) { pr_warn("%s: OPAL error %d setting XIVE %d PE\n", pci_name(dev), rc, xive_num); return -EIO; } if (is_64) { rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1, &addr64, &data); if (rc) { pr_warn("%s: OPAL error %d getting 64-bit MSI data\n", pci_name(dev), rc); return -EIO; } msg->address_hi = addr64 >> 32; msg->address_lo = addr64 & 0xfffffffful; } else { rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1, &addr32, &data); if (rc) { pr_warn("%s: OPAL error %d getting 32-bit MSI data\n", pci_name(dev), rc); return -EIO; } msg->address_hi = 0; msg->address_lo = addr32; } msg->data = data; /* * Change the IRQ chip for the MSI interrupts on PHB3. * The corresponding IRQ chip should be populated for * the first time. */ if (phb->type == PNV_PHB_IODA2) { if (!phb->ioda.irq_chip_init) { idata = irq_get_irq_data(virq); ichip = irq_data_get_irq_chip(idata); phb->ioda.irq_chip_init = 1; phb->ioda.irq_chip = *ichip; phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi; } irq_set_chip(virq, &phb->ioda.irq_chip); } pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d)," " address=%x_%08x data=%x PE# %d\n", pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num, msg->address_hi, msg->address_lo, data, pe->pe_number); return 0; } static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { unsigned int count; const __be32 *prop = of_get_property(phb->hose->dn, "ibm,opal-msi-ranges", NULL); if (!prop) { /* BML Fallback */ prop = of_get_property(phb->hose->dn, "msi-ranges", NULL); } if (!prop) return; phb->msi_base = be32_to_cpup(prop); count = be32_to_cpup(prop + 1); if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) { pr_err("PCI %d: Failed to allocate MSI bitmap !\n", phb->hose->global_number); return; } phb->msi_setup = pnv_pci_ioda_msi_setup; phb->msi32_support = 1; pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n", count, phb->msi_base); } #else static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { } #endif /* CONFIG_PCI_MSI */ /* * This function is supposed to be called on basis of PE from top * to bottom style. So the the I/O or MMIO segment assigned to * parent PE could be overrided by its child PEs if necessary. */ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose, struct pnv_ioda_pe *pe) { struct pnv_phb *phb = hose->private_data; struct pci_bus_region region; struct resource *res; int i, index; int rc; /* * NOTE: We only care PCI bus based PE for now. For PCI * device based PE, for example SRIOV sensitive VF should * be figured out later. */ BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))); pci_bus_for_each_resource(pe->pbus, res, i) { if (!res || !res->flags || res->start > res->end) continue; if (res->flags & IORESOURCE_IO) { region.start = res->start - phb->ioda.io_pci_base; region.end = res->end - phb->ioda.io_pci_base; index = region.start / phb->ioda.io_segsize; while (index < phb->ioda.total_pe && region.start <= region.end) { phb->ioda.io_segmap[index] = pe->pe_number; rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index); if (rc != OPAL_SUCCESS) { pr_err("%s: OPAL error %d when mapping IO " "segment #%d to PE#%d\n", __func__, rc, index, pe->pe_number); break; } region.start += phb->ioda.io_segsize; index++; } } else if (res->flags & IORESOURCE_MEM) { /* WARNING: Assumes M32 is mem region 0 in PHB. We need to * harden that algorithm when we start supporting M64 */ region.start = res->start - hose->mem_offset[0] - phb->ioda.m32_pci_base; region.end = res->end - hose->mem_offset[0] - phb->ioda.m32_pci_base; index = region.start / phb->ioda.m32_segsize; while (index < phb->ioda.total_pe && region.start <= region.end) { phb->ioda.m32_segmap[index] = pe->pe_number; rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index); if (rc != OPAL_SUCCESS) { pr_err("%s: OPAL error %d when mapping M32 " "segment#%d to PE#%d", __func__, rc, index, pe->pe_number); break; } region.start += phb->ioda.m32_segsize; index++; } } } } static void pnv_pci_ioda_setup_seg(void) { struct pci_controller *tmp, *hose; struct pnv_phb *phb; struct pnv_ioda_pe *pe; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { phb = hose->private_data; list_for_each_entry(pe, &phb->ioda.pe_list, list) { pnv_ioda_setup_pe_seg(hose, pe); } } } static void pnv_pci_ioda_setup_DMA(void) { struct pci_controller *hose, *tmp; struct pnv_phb *phb; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { pnv_ioda_setup_dma(hose->private_data); /* Mark the PHB initialization done */ phb = hose->private_data; phb->initialized = 1; } } static void pnv_pci_ioda_fixup(void) { pnv_pci_ioda_setup_PEs(); pnv_pci_ioda_setup_seg(); pnv_pci_ioda_setup_DMA(); } /* * Returns the alignment for I/O or memory windows for P2P * bridges. That actually depends on how PEs are segmented. * For now, we return I/O or M32 segment size for PE sensitive * P2P bridges. Otherwise, the default values (4KiB for I/O, * 1MiB for memory) will be returned. * * The current PCI bus might be put into one PE, which was * create against the parent PCI bridge. For that case, we * needn't enlarge the alignment so that we can save some * resources. */ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, unsigned long type) { struct pci_dev *bridge; struct pci_controller *hose = pci_bus_to_host(bus); struct pnv_phb *phb = hose->private_data; int num_pci_bridges = 0; bridge = bus->self; while (bridge) { if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) { num_pci_bridges++; if (num_pci_bridges >= 2) return 1; } bridge = bridge->bus->self; } /* We need support prefetchable memory window later */ if (type & IORESOURCE_MEM) return phb->ioda.m32_segsize; return phb->ioda.io_segsize; } /* Prevent enabling devices for which we couldn't properly * assign a PE */ static int pnv_pci_enable_device_hook(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; struct pci_dn *pdn; /* The function is probably called while the PEs have * not be created yet. For example, resource reassignment * during PCI probe period. We just skip the check if * PEs isn't ready. */ if (!phb->initialized) return 0; pdn = pci_get_pdn(dev); if (!pdn || pdn->pe_number == IODA_INVALID_PE) return -EINVAL; return 0; } static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn) { return phb->ioda.pe_rmap[(bus->number << 8) | devfn]; } static void pnv_pci_ioda_shutdown(struct pnv_phb *phb) { opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET); } void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) { struct pci_controller *hose; static int primary = 1; struct pnv_phb *phb; unsigned long size, m32map_off, iomap_off, pemap_off; const u64 *prop64; const u32 *prop32; u64 phb_id; void *aux; long rc; pr_info(" Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name); prop64 = of_get_property(np, "ibm,opal-phbid", NULL); if (!prop64) { pr_err(" Missing \"ibm,opal-phbid\" property !\n"); return; } phb_id = be64_to_cpup(prop64); pr_debug(" PHB-ID : 0x%016llx\n", phb_id); phb = alloc_bootmem(sizeof(struct pnv_phb)); if (phb) { memset(phb, 0, sizeof(struct pnv_phb)); phb->hose = hose = pcibios_alloc_controller(np); } if (!phb || !phb->hose) { pr_err("PCI: Failed to allocate PCI controller for %s\n", np->full_name); return; } spin_lock_init(&phb->lock); /* XXX Use device-tree */ hose->first_busno = 0; hose->last_busno = 0xff; hose->private_data = phb; phb->opal_id = phb_id; phb->type = ioda_type; /* Detect specific models for error handling */ if (of_device_is_compatible(np, "ibm,p7ioc-pciex")) phb->model = PNV_PHB_MODEL_P7IOC; else if (of_device_is_compatible(np, "ibm,power8-pciex")) phb->model = PNV_PHB_MODEL_PHB3; else phb->model = PNV_PHB_MODEL_UNKNOWN; /* Parse 32-bit and IO ranges (if any) */ pci_process_bridge_OF_ranges(phb->hose, np, primary); primary = 0; /* Get registers */ phb->regs = of_iomap(np, 0); if (phb->regs == NULL) pr_err(" Failed to map registers !\n"); /* Initialize more IODA stuff */ prop32 = of_get_property(np, "ibm,opal-num-pes", NULL); if (!prop32) phb->ioda.total_pe = 1; else phb->ioda.total_pe = *prop32; phb->ioda.m32_size = resource_size(&hose->mem_resources[0]); /* FW Has already off top 64k of M32 space (MSI space) */ phb->ioda.m32_size += 0x10000; phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe; phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0]; phb->ioda.io_size = hose->pci_io_size; phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe; phb->ioda.io_pci_base = 0; /* XXX calculate this ? */ /* Allocate aux data & arrays * * XXX TODO: Don't allocate io segmap on PHB3 */ size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long)); m32map_off = size; size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]); iomap_off = size; size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]); pemap_off = size; size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe); aux = alloc_bootmem(size); memset(aux, 0, size); phb->ioda.pe_alloc = aux; phb->ioda.m32_segmap = aux + m32map_off; phb->ioda.io_segmap = aux + iomap_off; phb->ioda.pe_array = aux + pemap_off; set_bit(0, phb->ioda.pe_alloc); INIT_LIST_HEAD(&phb->ioda.pe_dma_list); INIT_LIST_HEAD(&phb->ioda.pe_list); /* Calculate how many 32-bit TCE segments we have */ phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28; /* Clear unusable m64 */ hose->mem_resources[1].flags = 0; hose->mem_resources[1].start = 0; hose->mem_resources[1].end = 0; hose->mem_resources[2].flags = 0; hose->mem_resources[2].start = 0; hose->mem_resources[2].end = 0; #if 0 /* We should really do that ... */ rc = opal_pci_set_phb_mem_window(opal->phb_id, window_type, window_num, starting_real_address, starting_pci_address, segment_size); #endif pr_info(" %d PE's M32: 0x%x [segment=0x%x] IO: 0x%x [segment=0x%x]\n", phb->ioda.total_pe, phb->ioda.m32_size, phb->ioda.m32_segsize, phb->ioda.io_size, phb->ioda.io_segsize); phb->hose->ops = &pnv_pci_ops; /* Setup RID -> PE mapping function */ phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe; /* Setup TCEs */ phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; /* Setup shutdown function for kexec */ phb->shutdown = pnv_pci_ioda_shutdown; /* Setup MSI support */ pnv_pci_init_ioda_msis(phb); /* * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here * to let the PCI core do resource assignment. It's supposed * that the PCI core will do correct I/O and MMIO alignment * for the P2P bridge bars so that each PCI bus (excluding * the child P2P bridges) can form individual PE. */ ppc_md.pcibios_fixup = pnv_pci_ioda_fixup; ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook; ppc_md.pcibios_window_alignment = pnv_pci_window_alignment; pci_add_flags(PCI_REASSIGN_ALL_RSRC); /* Reset IODA tables to a clean state */ rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET); if (rc) pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc); /* * On IODA1 map everything to PE#0, on IODA2 we assume the IODA reset * has cleared the RTT which has the same effect */ if (ioda_type == PNV_PHB_IODA1) opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE); } void pnv_pci_init_ioda2_phb(struct device_node *np) { pnv_pci_init_ioda_phb(np, PNV_PHB_IODA2); } void __init pnv_pci_init_ioda_hub(struct device_node *np) { struct device_node *phbn; const u64 *prop64; u64 hub_id; pr_info("Probing IODA IO-Hub %s\n", np->full_name); prop64 = of_get_property(np, "ibm,opal-hubid", NULL); if (!prop64) { pr_err(" Missing \"ibm,opal-hubid\" property !\n"); return; } hub_id = be64_to_cpup(prop64); pr_devel(" HUB-ID : 0x%016llx\n", hub_id); /* Count child PHBs */ for_each_child_of_node(np, phbn) { /* Look for IODA1 PHBs */ if (of_device_is_compatible(phbn, "ibm,ioda-phb")) pnv_pci_init_ioda_phb(phbn, PNV_PHB_IODA1); } }
gpl-2.0
chruck/cpsc8220
linux-4.3.3/drivers/rtc/rtc-isl12022.c
418
7404
/* * An I2C driver for the Intersil ISL 12022 * * Author: Roman Fietze <roman.fietze@telemotive.de> * * Based on the Philips PCF8563 RTC * by Alessandro Zummo <a.zummo@towertech.it>. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. */ #include <linux/i2c.h> #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/err.h> #include <linux/of.h> #include <linux/of_device.h> #define DRV_VERSION "0.1" /* ISL register offsets */ #define ISL12022_REG_SC 0x00 #define ISL12022_REG_MN 0x01 #define ISL12022_REG_HR 0x02 #define ISL12022_REG_DT 0x03 #define ISL12022_REG_MO 0x04 #define ISL12022_REG_YR 0x05 #define ISL12022_REG_DW 0x06 #define ISL12022_REG_SR 0x07 #define ISL12022_REG_INT 0x08 /* ISL register bits */ #define ISL12022_HR_MIL (1 << 7) /* military or 24 hour time */ #define ISL12022_SR_LBAT85 (1 << 2) #define ISL12022_SR_LBAT75 (1 << 1) #define ISL12022_INT_WRTC (1 << 6) static struct i2c_driver isl12022_driver; struct isl12022 { struct rtc_device *rtc; bool write_enabled; /* true if write enable is set */ }; static int isl12022_read_regs(struct i2c_client *client, uint8_t reg, uint8_t *data, size_t n) { struct i2c_msg msgs[] = { { .addr = client->addr, .flags = 0, .len = 1, .buf = data }, /* setup read ptr */ { .addr = client->addr, .flags = I2C_M_RD, .len = n, .buf = data } }; int ret; data[0] = reg; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) { dev_err(&client->dev, "%s: read error, ret=%d\n", __func__, ret); return -EIO; } return 0; } static int isl12022_write_reg(struct i2c_client *client, uint8_t reg, uint8_t val) { uint8_t data[2] = { reg, val }; int err; err = i2c_master_send(client, data, sizeof(data)); if (err != sizeof(data)) { dev_err(&client->dev, "%s: err=%d addr=%02x, data=%02x\n", __func__, err, data[0], data[1]); return -EIO; } return 0; } /* * In the routines that deal directly with the isl12022 hardware, we use * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch. */ static int isl12022_get_datetime(struct i2c_client *client, struct rtc_time *tm) { uint8_t buf[ISL12022_REG_INT + 1]; int ret; ret = isl12022_read_regs(client, ISL12022_REG_SC, buf, sizeof(buf)); if (ret) return ret; if (buf[ISL12022_REG_SR] & (ISL12022_SR_LBAT85 | ISL12022_SR_LBAT75)) { dev_warn(&client->dev, "voltage dropped below %u%%, " "date and time is not reliable.\n", buf[ISL12022_REG_SR] & ISL12022_SR_LBAT85 ? 85 : 75); } dev_dbg(&client->dev, "%s: raw data is sec=%02x, min=%02x, hr=%02x, " "mday=%02x, mon=%02x, year=%02x, wday=%02x, " "sr=%02x, int=%02x", __func__, buf[ISL12022_REG_SC], buf[ISL12022_REG_MN], buf[ISL12022_REG_HR], buf[ISL12022_REG_DT], buf[ISL12022_REG_MO], buf[ISL12022_REG_YR], buf[ISL12022_REG_DW], buf[ISL12022_REG_SR], buf[ISL12022_REG_INT]); tm->tm_sec = bcd2bin(buf[ISL12022_REG_SC] & 0x7F); tm->tm_min = bcd2bin(buf[ISL12022_REG_MN] & 0x7F); tm->tm_hour = bcd2bin(buf[ISL12022_REG_HR] & 0x3F); tm->tm_mday = bcd2bin(buf[ISL12022_REG_DT] & 0x3F); tm->tm_wday = buf[ISL12022_REG_DW] & 0x07; tm->tm_mon = bcd2bin(buf[ISL12022_REG_MO] & 0x1F) - 1; tm->tm_year = bcd2bin(buf[ISL12022_REG_YR]) + 100; dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, " "mday=%d, mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); return rtc_valid_tm(tm); } static int isl12022_set_datetime(struct i2c_client *client, struct rtc_time *tm) { struct isl12022 *isl12022 = i2c_get_clientdata(client); size_t i; int ret; uint8_t buf[ISL12022_REG_DW + 1]; dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, " "mday=%d, mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); if (!isl12022->write_enabled) { ret = isl12022_read_regs(client, ISL12022_REG_INT, buf, 1); if (ret) return ret; /* Check if WRTC (write rtc enable) is set factory default is * 0 (not set) */ if (!(buf[0] & ISL12022_INT_WRTC)) { dev_info(&client->dev, "init write enable and 24 hour format\n"); /* Set the write enable bit. */ ret = isl12022_write_reg(client, ISL12022_REG_INT, buf[0] | ISL12022_INT_WRTC); if (ret) return ret; /* Write to any RTC register to start RTC, we use the * HR register, setting the MIL bit to use the 24 hour * format. */ ret = isl12022_read_regs(client, ISL12022_REG_HR, buf, 1); if (ret) return ret; ret = isl12022_write_reg(client, ISL12022_REG_HR, buf[0] | ISL12022_HR_MIL); if (ret) return ret; } isl12022->write_enabled = 1; } /* hours, minutes and seconds */ buf[ISL12022_REG_SC] = bin2bcd(tm->tm_sec); buf[ISL12022_REG_MN] = bin2bcd(tm->tm_min); buf[ISL12022_REG_HR] = bin2bcd(tm->tm_hour) | ISL12022_HR_MIL; buf[ISL12022_REG_DT] = bin2bcd(tm->tm_mday); /* month, 1 - 12 */ buf[ISL12022_REG_MO] = bin2bcd(tm->tm_mon + 1); /* year and century */ buf[ISL12022_REG_YR] = bin2bcd(tm->tm_year % 100); buf[ISL12022_REG_DW] = tm->tm_wday & 0x07; /* write register's data */ for (i = 0; i < ARRAY_SIZE(buf); i++) { ret = isl12022_write_reg(client, ISL12022_REG_SC + i, buf[ISL12022_REG_SC + i]); if (ret) return -EIO; } return 0; } static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm) { return isl12022_get_datetime(to_i2c_client(dev), tm); } static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm) { return isl12022_set_datetime(to_i2c_client(dev), tm); } static const struct rtc_class_ops isl12022_rtc_ops = { .read_time = isl12022_rtc_read_time, .set_time = isl12022_rtc_set_time, }; static int isl12022_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct isl12022 *isl12022; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; isl12022 = devm_kzalloc(&client->dev, sizeof(struct isl12022), GFP_KERNEL); if (!isl12022) return -ENOMEM; dev_dbg(&client->dev, "chip found, driver version " DRV_VERSION "\n"); i2c_set_clientdata(client, isl12022); isl12022->rtc = devm_rtc_device_register(&client->dev, isl12022_driver.driver.name, &isl12022_rtc_ops, THIS_MODULE); return PTR_ERR_OR_ZERO(isl12022->rtc); } #ifdef CONFIG_OF static const struct of_device_id isl12022_dt_match[] = { { .compatible = "isl,isl12022" }, /* for backward compat., don't use */ { .compatible = "isil,isl12022" }, { }, }; MODULE_DEVICE_TABLE(of, isl12022_dt_match); #endif static const struct i2c_device_id isl12022_id[] = { { "isl12022", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, isl12022_id); static struct i2c_driver isl12022_driver = { .driver = { .name = "rtc-isl12022", #ifdef CONFIG_OF .of_match_table = of_match_ptr(isl12022_dt_match), #endif }, .probe = isl12022_probe, .id_table = isl12022_id, }; module_i2c_driver(isl12022_driver); MODULE_AUTHOR("roman.fietze@telemotive.de"); MODULE_DESCRIPTION("ISL 12022 RTC driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
steppnasty/htc-kernel-msm7x30
arch/arm/kernel/etm.c
674
13306
/* * linux/arch/arm/kernel/etm.c * * Driver for ARM's Embedded Trace Macrocell and Embedded Trace Buffer. * * Copyright (C) 2009 Nokia Corporation. * Alexander Shishkin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/io.h> #include <linux/sysrq.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/amba/bus.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <asm/hardware/coresight.h> #include <asm/sections.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Shishkin"); static struct tracectx tracer; static inline bool trace_isrunning(struct tracectx *t) { return !!(t->flags & TRACER_RUNNING); } static int etm_setup_address_range(struct tracectx *t, int n, unsigned long start, unsigned long end, int exclude, int data) { u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \ ETMAAT_NOVALCMP; if (n < 1 || n > t->ncmppairs) return -EINVAL; /* comparators and ranges are numbered starting with 1 as opposed * to bits in a word */ n--; if (data) flags |= ETMAAT_DLOADSTORE; else flags |= ETMAAT_IEXEC; /* first comparator for the range */ etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2)); etm_writel(t, start, ETMR_COMP_VAL(n * 2)); /* second comparator is right next to it */ etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1)); etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1)); flags = exclude ? ETMTE_INCLEXCL : 0; etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL); return 0; } static int trace_start(struct tracectx *t) { u32 v; unsigned long timeout = TRACER_TIMEOUT; etb_unlock(t); etb_writel(t, 0, ETBR_FORMATTERCTRL); etb_writel(t, 1, ETBR_CTRL); etb_lock(t); /* configure etm */ v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz); if (t->flags & TRACER_CYCLE_ACC) v |= ETMCTRL_CYCLEACCURATE; etm_unlock(t); etm_writel(t, v, ETMR_CTRL); while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout) ; if (!timeout) { dev_dbg(t->dev, "Waiting for progbit to assert timed out\n"); etm_lock(t); return -EFAULT; } etm_setup_address_range(t, 1, (unsigned long)_stext, (unsigned long)_etext, 0, 0); etm_writel(t, 0, ETMR_TRACEENCTRL2); etm_writel(t, 0, ETMR_TRACESSCTRL); etm_writel(t, 0x6f, ETMR_TRACEENEVT); v &= ~ETMCTRL_PROGRAM; v |= ETMCTRL_PORTSEL; etm_writel(t, v, ETMR_CTRL); timeout = TRACER_TIMEOUT; while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout) ; if (!timeout) { dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n"); etm_lock(t); return -EFAULT; } etm_lock(t); t->flags |= TRACER_RUNNING; return 0; } static int trace_stop(struct tracectx *t) { unsigned long timeout = TRACER_TIMEOUT; etm_unlock(t); etm_writel(t, 0x440, ETMR_CTRL); while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout) ; if (!timeout) { dev_dbg(t->dev, "Waiting for progbit to assert timed out\n"); etm_lock(t); return -EFAULT; } etm_lock(t); etb_unlock(t); etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL); timeout = TRACER_TIMEOUT; while (etb_readl(t, ETBR_FORMATTERCTRL) & ETBFF_MANUAL_FLUSH && --timeout) ; if (!timeout) { dev_dbg(t->dev, "Waiting for formatter flush to commence " "timed out\n"); etb_lock(t); return -EFAULT; } etb_writel(t, 0, ETBR_CTRL); etb_lock(t); t->flags &= ~TRACER_RUNNING; return 0; } static int etb_getdatalen(struct tracectx *t) { u32 v; int rp, wp; v = etb_readl(t, ETBR_STATUS); if (v & 1) return t->etb_bufsz; rp = etb_readl(t, ETBR_READADDR); wp = etb_readl(t, ETBR_WRITEADDR); if (rp > wp) { etb_writel(t, 0, ETBR_READADDR); etb_writel(t, 0, ETBR_WRITEADDR); return 0; } return wp - rp; } /* sysrq+v will always stop the running trace and leave it at that */ static void etm_dump(void) { struct tracectx *t = &tracer; u32 first = 0; int length; if (!t->etb_regs) { printk(KERN_INFO "No tracing hardware found\n"); return; } if (trace_isrunning(t)) trace_stop(t); etb_unlock(t); length = etb_getdatalen(t); if (length == t->etb_bufsz) first = etb_readl(t, ETBR_WRITEADDR); etb_writel(t, first, ETBR_READADDR); printk(KERN_INFO "Trace buffer contents length: %d\n", length); printk(KERN_INFO "--- ETB buffer begin ---\n"); for (; length; length--) printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM))); printk(KERN_INFO "\n--- ETB buffer end ---\n"); /* deassert the overflow bit */ etb_writel(t, 1, ETBR_CTRL); etb_writel(t, 0, ETBR_CTRL); etb_writel(t, 0, ETBR_TRIGGERCOUNT); etb_writel(t, 0, ETBR_READADDR); etb_writel(t, 0, ETBR_WRITEADDR); etb_lock(t); } static void sysrq_etm_dump(int key, struct tty_struct *tty) { dev_dbg(tracer.dev, "Dumping ETB buffer\n"); etm_dump(); } static struct sysrq_key_op sysrq_etm_op = { .handler = sysrq_etm_dump, .help_msg = "ETM buffer dump", .action_msg = "etm", }; static int etb_open(struct inode *inode, struct file *file) { if (!tracer.etb_regs) return -ENODEV; file->private_data = &tracer; return nonseekable_open(inode, file); } static ssize_t etb_read(struct file *file, char __user *data, size_t len, loff_t *ppos) { int total, i; long length; struct tracectx *t = file->private_data; u32 first = 0; u32 *buf; mutex_lock(&t->mutex); if (trace_isrunning(t)) { length = 0; goto out; } etb_unlock(t); total = etb_getdatalen(t); if (total == t->etb_bufsz) first = etb_readl(t, ETBR_WRITEADDR); etb_writel(t, first, ETBR_READADDR); length = min(total * 4, (int)len); buf = vmalloc(length); dev_dbg(t->dev, "ETB buffer length: %d\n", total); dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS)); for (i = 0; i < length / 4; i++) buf[i] = etb_readl(t, ETBR_READMEM); /* the only way to deassert overflow bit in ETB status is this */ etb_writel(t, 1, ETBR_CTRL); etb_writel(t, 0, ETBR_CTRL); etb_writel(t, 0, ETBR_WRITEADDR); etb_writel(t, 0, ETBR_READADDR); etb_writel(t, 0, ETBR_TRIGGERCOUNT); etb_lock(t); length -= copy_to_user(data, buf, length); vfree(buf); out: mutex_unlock(&t->mutex); return length; } static int etb_release(struct inode *inode, struct file *file) { /* there's nothing to do here, actually */ return 0; } static const struct file_operations etb_fops = { .owner = THIS_MODULE, .read = etb_read, .open = etb_open, .release = etb_release, }; static struct miscdevice etb_miscdev = { .name = "tracebuf", .minor = 0, .fops = &etb_fops, }; static int __init etb_probe(struct amba_device *dev, struct amba_id *id) { struct tracectx *t = &tracer; int ret = 0; ret = amba_request_regions(dev, NULL); if (ret) goto out; t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res)); if (!t->etb_regs) { ret = -ENOMEM; goto out_release; } amba_set_drvdata(dev, t); etb_miscdev.parent = &dev->dev; ret = misc_register(&etb_miscdev); if (ret) goto out_unmap; t->emu_clk = clk_get(&dev->dev, "emu_src_ck"); if (IS_ERR(t->emu_clk)) { dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n"); return -EFAULT; } clk_enable(t->emu_clk); etb_unlock(t); t->etb_bufsz = etb_readl(t, ETBR_DEPTH); dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz); /* make sure trace capture is disabled */ etb_writel(t, 0, ETBR_CTRL); etb_writel(t, 0x1000, ETBR_FORMATTERCTRL); etb_lock(t); dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n"); out: return ret; out_unmap: amba_set_drvdata(dev, NULL); iounmap(t->etb_regs); out_release: amba_release_regions(dev); return ret; } static int etb_remove(struct amba_device *dev) { struct tracectx *t = amba_get_drvdata(dev); amba_set_drvdata(dev, NULL); iounmap(t->etb_regs); t->etb_regs = NULL; clk_disable(t->emu_clk); clk_put(t->emu_clk); amba_release_regions(dev); return 0; } static struct amba_id etb_ids[] = { { .id = 0x0003b907, .mask = 0x0007ffff, }, { 0, 0 }, }; static struct amba_driver etb_driver = { .drv = { .name = "etb", .owner = THIS_MODULE, }, .probe = etb_probe, .remove = etb_remove, .id_table = etb_ids, }; /* use a sysfs file "trace_running" to start/stop tracing */ static ssize_t trace_running_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%x\n", trace_isrunning(&tracer)); } static ssize_t trace_running_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { unsigned int value; int ret; if (sscanf(buf, "%u", &value) != 1) return -EINVAL; mutex_lock(&tracer.mutex); ret = value ? trace_start(&tracer) : trace_stop(&tracer); mutex_unlock(&tracer.mutex); return ret ? : n; } static struct kobj_attribute trace_running_attr = __ATTR(trace_running, 0644, trace_running_show, trace_running_store); static ssize_t trace_info_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st; int datalen; etb_unlock(&tracer); datalen = etb_getdatalen(&tracer); etb_wa = etb_readl(&tracer, ETBR_WRITEADDR); etb_ra = etb_readl(&tracer, ETBR_READADDR); etb_st = etb_readl(&tracer, ETBR_STATUS); etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL); etb_lock(&tracer); etm_unlock(&tracer); etm_ctrl = etm_readl(&tracer, ETMR_CTRL); etm_st = etm_readl(&tracer, ETMR_STATUS); etm_lock(&tracer); return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n" "ETBR_WRITEADDR:\t%08x\n" "ETBR_READADDR:\t%08x\n" "ETBR_STATUS:\t%08x\n" "ETBR_FORMATTERCTRL:\t%08x\n" "ETMR_CTRL:\t%08x\n" "ETMR_STATUS:\t%08x\n", datalen, tracer.ncmppairs, etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st ); } static struct kobj_attribute trace_info_attr = __ATTR(trace_info, 0444, trace_info_show, NULL); static ssize_t trace_mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d %d\n", !!(tracer.flags & TRACER_CYCLE_ACC), tracer.etm_portsz); } static ssize_t trace_mode_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { unsigned int cycacc, portsz; if (sscanf(buf, "%u %u", &cycacc, &portsz) != 2) return -EINVAL; mutex_lock(&tracer.mutex); if (cycacc) tracer.flags |= TRACER_CYCLE_ACC; else tracer.flags &= ~TRACER_CYCLE_ACC; tracer.etm_portsz = portsz & 0x0f; mutex_unlock(&tracer.mutex); return n; } static struct kobj_attribute trace_mode_attr = __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store); static int __init etm_probe(struct amba_device *dev, struct amba_id *id) { struct tracectx *t = &tracer; int ret = 0; if (t->etm_regs) { dev_dbg(&dev->dev, "ETM already initialized\n"); ret = -EBUSY; goto out; } ret = amba_request_regions(dev, NULL); if (ret) goto out; t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res)); if (!t->etm_regs) { ret = -ENOMEM; goto out_release; } amba_set_drvdata(dev, t); mutex_init(&t->mutex); t->dev = &dev->dev; t->flags = TRACER_CYCLE_ACC; t->etm_portsz = 1; etm_unlock(t); ret = etm_readl(t, CSCR_PRSR); t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf; etm_writel(t, 0x440, ETMR_CTRL); etm_lock(t); ret = sysfs_create_file(&dev->dev.kobj, &trace_running_attr.attr); if (ret) goto out_unmap; /* failing to create any of these two is not fatal */ ret = sysfs_create_file(&dev->dev.kobj, &trace_info_attr.attr); if (ret) dev_dbg(&dev->dev, "Failed to create trace_info in sysfs\n"); ret = sysfs_create_file(&dev->dev.kobj, &trace_mode_attr.attr); if (ret) dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n"); dev_dbg(t->dev, "ETM AMBA driver initialized.\n"); out: return ret; out_unmap: amba_set_drvdata(dev, NULL); iounmap(t->etm_regs); out_release: amba_release_regions(dev); return ret; } static int etm_remove(struct amba_device *dev) { struct tracectx *t = amba_get_drvdata(dev); amba_set_drvdata(dev, NULL); iounmap(t->etm_regs); t->etm_regs = NULL; amba_release_regions(dev); sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr); sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr); sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr); return 0; } static struct amba_id etm_ids[] = { { .id = 0x0003b921, .mask = 0x0007ffff, }, { 0, 0 }, }; static struct amba_driver etm_driver = { .drv = { .name = "etm", .owner = THIS_MODULE, }, .probe = etm_probe, .remove = etm_remove, .id_table = etm_ids, }; static int __init etm_init(void) { int retval; retval = amba_driver_register(&etb_driver); if (retval) { printk(KERN_ERR "Failed to register etb\n"); return retval; } retval = amba_driver_register(&etm_driver); if (retval) { amba_driver_unregister(&etb_driver); printk(KERN_ERR "Failed to probe etm\n"); return retval; } /* not being able to install this handler is not fatal */ (void)register_sysrq_key('v', &sysrq_etm_op); return 0; } device_initcall(etm_init);
gpl-2.0
vishnuavula/ntb
arch/x86/kernel/alternative.c
674
17018
#define pr_fmt(fmt) "SMP alternatives: " fmt #include <linux/module.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/stringify.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/memory.h> #include <linux/stop_machine.h> #include <linux/slab.h> #include <linux/kdebug.h> #include <asm/alternative.h> #include <asm/sections.h> #include <asm/pgtable.h> #include <asm/mce.h> #include <asm/nmi.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/io.h> #include <asm/fixmap.h> #define MAX_PATCH_LEN (255-1) static int __initdata_or_module debug_alternative; static int __init debug_alt(char *str) { debug_alternative = 1; return 1; } __setup("debug-alternative", debug_alt); static int noreplace_smp; static int __init setup_noreplace_smp(char *str) { noreplace_smp = 1; return 1; } __setup("noreplace-smp", setup_noreplace_smp); #ifdef CONFIG_PARAVIRT static int __initdata_or_module noreplace_paravirt = 0; static int __init setup_noreplace_paravirt(char *str) { noreplace_paravirt = 1; return 1; } __setup("noreplace-paravirt", setup_noreplace_paravirt); #endif #define DPRINTK(fmt, ...) \ do { \ if (debug_alternative) \ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ } while (0) /* * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes * that correspond to that nop. Getting from one nop to the next, we * add to the array the offset that is equal to the sum of all sizes of * nops preceding the one we are after. * * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the * nice symmetry of sizes of the previous nops. */ #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) static const unsigned char intelnops[] = { GENERIC_NOP1, GENERIC_NOP2, GENERIC_NOP3, GENERIC_NOP4, GENERIC_NOP5, GENERIC_NOP6, GENERIC_NOP7, GENERIC_NOP8, GENERIC_NOP5_ATOMIC }; static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = { NULL, intelnops, intelnops + 1, intelnops + 1 + 2, intelnops + 1 + 2 + 3, intelnops + 1 + 2 + 3 + 4, intelnops + 1 + 2 + 3 + 4 + 5, intelnops + 1 + 2 + 3 + 4 + 5 + 6, intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, }; #endif #ifdef K8_NOP1 static const unsigned char k8nops[] = { K8_NOP1, K8_NOP2, K8_NOP3, K8_NOP4, K8_NOP5, K8_NOP6, K8_NOP7, K8_NOP8, K8_NOP5_ATOMIC }; static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = { NULL, k8nops, k8nops + 1, k8nops + 1 + 2, k8nops + 1 + 2 + 3, k8nops + 1 + 2 + 3 + 4, k8nops + 1 + 2 + 3 + 4 + 5, k8nops + 1 + 2 + 3 + 4 + 5 + 6, k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, }; #endif #if defined(K7_NOP1) && !defined(CONFIG_X86_64) static const unsigned char k7nops[] = { K7_NOP1, K7_NOP2, K7_NOP3, K7_NOP4, K7_NOP5, K7_NOP6, K7_NOP7, K7_NOP8, K7_NOP5_ATOMIC }; static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = { NULL, k7nops, k7nops + 1, k7nops + 1 + 2, k7nops + 1 + 2 + 3, k7nops + 1 + 2 + 3 + 4, k7nops + 1 + 2 + 3 + 4 + 5, k7nops + 1 + 2 + 3 + 4 + 5 + 6, k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, }; #endif #ifdef P6_NOP1 static const unsigned char p6nops[] = { P6_NOP1, P6_NOP2, P6_NOP3, P6_NOP4, P6_NOP5, P6_NOP6, P6_NOP7, P6_NOP8, P6_NOP5_ATOMIC }; static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = { NULL, p6nops, p6nops + 1, p6nops + 1 + 2, p6nops + 1 + 2 + 3, p6nops + 1 + 2 + 3 + 4, p6nops + 1 + 2 + 3 + 4 + 5, p6nops + 1 + 2 + 3 + 4 + 5 + 6, p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, }; #endif /* Initialize these to a safe default */ #ifdef CONFIG_X86_64 const unsigned char * const *ideal_nops = p6_nops; #else const unsigned char * const *ideal_nops = intel_nops; #endif void __init arch_init_ideal_nops(void) { switch (boot_cpu_data.x86_vendor) { case X86_VENDOR_INTEL: /* * Due to a decoder implementation quirk, some * specific Intel CPUs actually perform better with * the "k8_nops" than with the SDM-recommended NOPs. */ if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model >= 0x0f && boot_cpu_data.x86_model != 0x1c && boot_cpu_data.x86_model != 0x26 && boot_cpu_data.x86_model != 0x27 && boot_cpu_data.x86_model < 0x30) { ideal_nops = k8_nops; } else if (boot_cpu_has(X86_FEATURE_NOPL)) { ideal_nops = p6_nops; } else { #ifdef CONFIG_X86_64 ideal_nops = k8_nops; #else ideal_nops = intel_nops; #endif } break; default: #ifdef CONFIG_X86_64 ideal_nops = k8_nops; #else if (boot_cpu_has(X86_FEATURE_K8)) ideal_nops = k8_nops; else if (boot_cpu_has(X86_FEATURE_K7)) ideal_nops = k7_nops; else ideal_nops = intel_nops; #endif } } /* Use this to add nops to a buffer, then text_poke the whole buffer. */ static void __init_or_module add_nops(void *insns, unsigned int len) { while (len > 0) { unsigned int noplen = len; if (noplen > ASM_NOP_MAX) noplen = ASM_NOP_MAX; memcpy(insns, ideal_nops[noplen], noplen); insns += noplen; len -= noplen; } } extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern s32 __smp_locks[], __smp_locks_end[]; void *text_poke_early(void *addr, const void *opcode, size_t len); /* Replace instructions with better alternatives for this CPU type. This runs before SMP is initialized to avoid SMP problems with self modifying code. This implies that asymmetric systems where APs have less capabilities than the boot processor are not handled. Tough. Make sure you disable such features by hand. */ void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end) { struct alt_instr *a; u8 *instr, *replacement; u8 insnbuf[MAX_PATCH_LEN]; DPRINTK("%s: alt table %p -> %p\n", __func__, start, end); /* * The scan order should be from start to end. A later scanned * alternative code can overwrite a previous scanned alternative code. * Some kernel functions (e.g. memcpy, memset, etc) use this order to * patch code. * * So be careful if you want to change the scan order to any other * order. */ for (a = start; a < end; a++) { instr = (u8 *)&a->instr_offset + a->instr_offset; replacement = (u8 *)&a->repl_offset + a->repl_offset; BUG_ON(a->replacementlen > a->instrlen); BUG_ON(a->instrlen > sizeof(insnbuf)); BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); if (!boot_cpu_has(a->cpuid)) continue; memcpy(insnbuf, replacement, a->replacementlen); /* 0xe8 is a relative jump; fix the offset. */ if (*insnbuf == 0xe8 && a->replacementlen == 5) *(s32 *)(insnbuf + 1) += replacement - instr; add_nops(insnbuf + a->replacementlen, a->instrlen - a->replacementlen); text_poke_early(instr, insnbuf, a->instrlen); } } #ifdef CONFIG_SMP static void alternatives_smp_lock(const s32 *start, const s32 *end, u8 *text, u8 *text_end) { const s32 *poff; mutex_lock(&text_mutex); for (poff = start; poff < end; poff++) { u8 *ptr = (u8 *)poff + *poff; if (!*poff || ptr < text || ptr >= text_end) continue; /* turn DS segment override prefix into lock prefix */ if (*ptr == 0x3e) text_poke(ptr, ((unsigned char []){0xf0}), 1); } mutex_unlock(&text_mutex); } static void alternatives_smp_unlock(const s32 *start, const s32 *end, u8 *text, u8 *text_end) { const s32 *poff; mutex_lock(&text_mutex); for (poff = start; poff < end; poff++) { u8 *ptr = (u8 *)poff + *poff; if (!*poff || ptr < text || ptr >= text_end) continue; /* turn lock prefix into DS segment override prefix */ if (*ptr == 0xf0) text_poke(ptr, ((unsigned char []){0x3E}), 1); } mutex_unlock(&text_mutex); } struct smp_alt_module { /* what is this ??? */ struct module *mod; char *name; /* ptrs to lock prefixes */ const s32 *locks; const s32 *locks_end; /* .text segment, needed to avoid patching init code ;) */ u8 *text; u8 *text_end; struct list_head next; }; static LIST_HEAD(smp_alt_modules); static DEFINE_MUTEX(smp_alt); static bool uniproc_patched = false; /* protected by smp_alt */ void __init_or_module alternatives_smp_module_add(struct module *mod, char *name, void *locks, void *locks_end, void *text, void *text_end) { struct smp_alt_module *smp; mutex_lock(&smp_alt); if (!uniproc_patched) goto unlock; if (num_possible_cpus() == 1) /* Don't bother remembering, we'll never have to undo it. */ goto smp_unlock; smp = kzalloc(sizeof(*smp), GFP_KERNEL); if (NULL == smp) /* we'll run the (safe but slow) SMP code then ... */ goto unlock; smp->mod = mod; smp->name = name; smp->locks = locks; smp->locks_end = locks_end; smp->text = text; smp->text_end = text_end; DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n", __func__, smp->locks, smp->locks_end, smp->text, smp->text_end, smp->name); list_add_tail(&smp->next, &smp_alt_modules); smp_unlock: alternatives_smp_unlock(locks, locks_end, text, text_end); unlock: mutex_unlock(&smp_alt); } void __init_or_module alternatives_smp_module_del(struct module *mod) { struct smp_alt_module *item; mutex_lock(&smp_alt); list_for_each_entry(item, &smp_alt_modules, next) { if (mod != item->mod) continue; list_del(&item->next); kfree(item); break; } mutex_unlock(&smp_alt); } void alternatives_enable_smp(void) { struct smp_alt_module *mod; /* Why bother if there are no other CPUs? */ BUG_ON(num_possible_cpus() == 1); mutex_lock(&smp_alt); if (uniproc_patched) { pr_info("switching to SMP code\n"); BUG_ON(num_online_cpus() != 1); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); list_for_each_entry(mod, &smp_alt_modules, next) alternatives_smp_lock(mod->locks, mod->locks_end, mod->text, mod->text_end); uniproc_patched = false; } mutex_unlock(&smp_alt); } /* Return 1 if the address range is reserved for smp-alternatives */ int alternatives_text_reserved(void *start, void *end) { struct smp_alt_module *mod; const s32 *poff; u8 *text_start = start; u8 *text_end = end; list_for_each_entry(mod, &smp_alt_modules, next) { if (mod->text > text_end || mod->text_end < text_start) continue; for (poff = mod->locks; poff < mod->locks_end; poff++) { const u8 *ptr = (const u8 *)poff + *poff; if (text_start <= ptr && text_end > ptr) return 1; } } return 0; } #endif #ifdef CONFIG_PARAVIRT void __init_or_module apply_paravirt(struct paravirt_patch_site *start, struct paravirt_patch_site *end) { struct paravirt_patch_site *p; char insnbuf[MAX_PATCH_LEN]; if (noreplace_paravirt) return; for (p = start; p < end; p++) { unsigned int used; BUG_ON(p->len > MAX_PATCH_LEN); /* prep the buffer with the original instructions */ memcpy(insnbuf, p->instr, p->len); used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, (unsigned long)p->instr, p->len); BUG_ON(used > p->len); /* Pad the rest with nops */ add_nops(insnbuf + used, p->len - used); text_poke_early(p->instr, insnbuf, p->len); } } extern struct paravirt_patch_site __start_parainstructions[], __stop_parainstructions[]; #endif /* CONFIG_PARAVIRT */ void __init alternative_instructions(void) { /* The patching is not fully atomic, so try to avoid local interruptions that might execute the to be patched code. Other CPUs are not running. */ stop_nmi(); /* * Don't stop machine check exceptions while patching. * MCEs only happen when something got corrupted and in this * case we must do something about the corruption. * Ignoring it is worse than a unlikely patching race. * Also machine checks tend to be broadcast and if one CPU * goes into machine check the others follow quickly, so we don't * expect a machine check to cause undue problems during to code * patching. */ apply_alternatives(__alt_instructions, __alt_instructions_end); #ifdef CONFIG_SMP /* Patch to UP if other cpus not imminent. */ if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { uniproc_patched = true; alternatives_smp_module_add(NULL, "core kernel", __smp_locks, __smp_locks_end, _text, _etext); } if (!uniproc_patched || num_possible_cpus() == 1) free_init_pages("SMP alternatives", (unsigned long)__smp_locks, (unsigned long)__smp_locks_end); #endif apply_paravirt(__parainstructions, __parainstructions_end); restart_nmi(); } /** * text_poke_early - Update instructions on a live kernel at boot time * @addr: address to modify * @opcode: source of the copy * @len: length to copy * * When you use this code to patch more than one byte of an instruction * you need to make sure that other CPUs cannot execute this code in parallel. * Also no thread must be currently preempted in the middle of these * instructions. And on the local CPU you need to be protected again NMI or MCE * handlers seeing an inconsistent instruction while you patch. */ void *__init_or_module text_poke_early(void *addr, const void *opcode, size_t len) { unsigned long flags; local_irq_save(flags); memcpy(addr, opcode, len); sync_core(); local_irq_restore(flags); /* Could also do a CLFLUSH here to speed up CPU recovery; but that causes hangs on some VIA CPUs. */ return addr; } /** * text_poke - Update instructions on a live kernel * @addr: address to modify * @opcode: source of the copy * @len: length to copy * * Only atomic text poke/set should be allowed when not doing early patching. * It means the size must be writable atomically and the address must be aligned * in a way that permits an atomic write. It also makes sure we fit on a single * page. * * Note: Must be called under text_mutex. */ void *text_poke(void *addr, const void *opcode, size_t len) { unsigned long flags; char *vaddr; struct page *pages[2]; int i; if (!core_kernel_text((unsigned long)addr)) { pages[0] = vmalloc_to_page(addr); pages[1] = vmalloc_to_page(addr + PAGE_SIZE); } else { pages[0] = virt_to_page(addr); WARN_ON(!PageReserved(pages[0])); pages[1] = virt_to_page(addr + PAGE_SIZE); } BUG_ON(!pages[0]); local_irq_save(flags); set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); if (pages[1]) set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); clear_fixmap(FIX_TEXT_POKE0); if (pages[1]) clear_fixmap(FIX_TEXT_POKE1); local_flush_tlb(); sync_core(); /* Could also do a CLFLUSH here to speed up CPU recovery; but that causes hangs on some VIA CPUs. */ for (i = 0; i < len; i++) BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); local_irq_restore(flags); return addr; } static void do_sync_core(void *info) { sync_core(); } static bool bp_patching_in_progress; static void *bp_int3_handler, *bp_int3_addr; int poke_int3_handler(struct pt_regs *regs) { /* bp_patching_in_progress */ smp_rmb(); if (likely(!bp_patching_in_progress)) return 0; if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr) return 0; /* set up the specified breakpoint handler */ regs->ip = (unsigned long) bp_int3_handler; return 1; } /** * text_poke_bp() -- update instructions on live kernel on SMP * @addr: address to patch * @opcode: opcode of new instruction * @len: length to copy * @handler: address to jump to when the temporary breakpoint is hit * * Modify multi-byte instruction by using int3 breakpoint on SMP. * We completely avoid stop_machine() here, and achieve the * synchronization using int3 breakpoint. * * The way it is done: * - add a int3 trap to the address that will be patched * - sync cores * - update all but the first byte of the patched range * - sync cores * - replace the first byte (int3) by the first byte of * replacing opcode * - sync cores * * Note: must be called under text_mutex. */ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) { unsigned char int3 = 0xcc; bp_int3_handler = handler; bp_int3_addr = (u8 *)addr + sizeof(int3); bp_patching_in_progress = true; /* * Corresponding read barrier in int3 notifier for * making sure the in_progress flags is correctly ordered wrt. * patching */ smp_wmb(); text_poke(addr, &int3, sizeof(int3)); on_each_cpu(do_sync_core, NULL, 1); if (len - sizeof(int3) > 0) { /* patch all but the first byte */ text_poke((char *)addr + sizeof(int3), (const char *) opcode + sizeof(int3), len - sizeof(int3)); /* * According to Intel, this core syncing is very likely * not necessary and we'd be safe even without it. But * better safe than sorry (plus there's not only Intel). */ on_each_cpu(do_sync_core, NULL, 1); } /* patch the first byte */ text_poke(addr, opcode, sizeof(int3)); on_each_cpu(do_sync_core, NULL, 1); bp_patching_in_progress = false; smp_wmb(); return addr; }
gpl-2.0
jrfastab/hardware_maps
drivers/soc/tegra/fuse/fuse-tegra.c
674
7689
/* * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ #include <linux/clk.h> #include <linux/device.h> #include <linux/kobject.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> #include <soc/tegra/common.h> #include <soc/tegra/fuse.h> #include "fuse.h" struct tegra_sku_info tegra_sku_info; EXPORT_SYMBOL(tegra_sku_info); static const char *tegra_revision_name[TEGRA_REVISION_MAX] = { [TEGRA_REVISION_UNKNOWN] = "unknown", [TEGRA_REVISION_A01] = "A01", [TEGRA_REVISION_A02] = "A02", [TEGRA_REVISION_A03] = "A03", [TEGRA_REVISION_A03p] = "A03 prime", [TEGRA_REVISION_A04] = "A04", }; static u8 fuse_readb(struct tegra_fuse *fuse, unsigned int offset) { u32 val; val = fuse->read(fuse, round_down(offset, 4)); val >>= (offset % 4) * 8; val &= 0xff; return val; } static ssize_t fuse_read(struct file *fd, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t pos, size_t size) { struct device *dev = kobj_to_dev(kobj); struct tegra_fuse *fuse = dev_get_drvdata(dev); int i; if (pos < 0 || pos >= attr->size) return 0; if (size > attr->size - pos) size = attr->size - pos; for (i = 0; i < size; i++) buf[i] = fuse_readb(fuse, pos + i); return i; } static struct bin_attribute fuse_bin_attr = { .attr = { .name = "fuse", .mode = S_IRUGO, }, .read = fuse_read, }; static int tegra_fuse_create_sysfs(struct device *dev, unsigned int size, const struct tegra_fuse_info *info) { fuse_bin_attr.size = size; return device_create_bin_file(dev, &fuse_bin_attr); } static const struct of_device_id car_match[] __initconst = { { .compatible = "nvidia,tegra20-car", }, { .compatible = "nvidia,tegra30-car", }, { .compatible = "nvidia,tegra114-car", }, { .compatible = "nvidia,tegra124-car", }, { .compatible = "nvidia,tegra132-car", }, { .compatible = "nvidia,tegra210-car", }, {}, }; static struct tegra_fuse *fuse = &(struct tegra_fuse) { .base = NULL, .soc = NULL, }; static const struct of_device_id tegra_fuse_match[] = { #ifdef CONFIG_ARCH_TEGRA_210_SOC { .compatible = "nvidia,tegra210-efuse", .data = &tegra210_fuse_soc }, #endif #ifdef CONFIG_ARCH_TEGRA_132_SOC { .compatible = "nvidia,tegra132-efuse", .data = &tegra124_fuse_soc }, #endif #ifdef CONFIG_ARCH_TEGRA_124_SOC { .compatible = "nvidia,tegra124-efuse", .data = &tegra124_fuse_soc }, #endif #ifdef CONFIG_ARCH_TEGRA_114_SOC { .compatible = "nvidia,tegra114-efuse", .data = &tegra114_fuse_soc }, #endif #ifdef CONFIG_ARCH_TEGRA_3x_SOC { .compatible = "nvidia,tegra30-efuse", .data = &tegra30_fuse_soc }, #endif #ifdef CONFIG_ARCH_TEGRA_2x_SOC { .compatible = "nvidia,tegra20-efuse", .data = &tegra20_fuse_soc }, #endif { /* sentinel */ } }; static int tegra_fuse_probe(struct platform_device *pdev) { void __iomem *base = fuse->base; struct resource *res; int err; /* take over the memory region from the early initialization */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); fuse->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(fuse->base)) return PTR_ERR(fuse->base); fuse->clk = devm_clk_get(&pdev->dev, "fuse"); if (IS_ERR(fuse->clk)) { dev_err(&pdev->dev, "failed to get FUSE clock: %ld", PTR_ERR(fuse->clk)); return PTR_ERR(fuse->clk); } platform_set_drvdata(pdev, fuse); fuse->dev = &pdev->dev; if (fuse->soc->probe) { err = fuse->soc->probe(fuse); if (err < 0) return err; } if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size, fuse->soc->info)) return -ENODEV; /* release the early I/O memory mapping */ iounmap(base); return 0; } static struct platform_driver tegra_fuse_driver = { .driver = { .name = "tegra-fuse", .of_match_table = tegra_fuse_match, .suppress_bind_attrs = true, }, .probe = tegra_fuse_probe, }; module_platform_driver(tegra_fuse_driver); bool __init tegra_fuse_read_spare(unsigned int spare) { unsigned int offset = fuse->soc->info->spare + spare * 4; return fuse->read_early(fuse, offset) & 1; } u32 __init tegra_fuse_read_early(unsigned int offset) { return fuse->read_early(fuse, offset); } int tegra_fuse_readl(unsigned long offset, u32 *value) { if (!fuse->read) return -EPROBE_DEFER; *value = fuse->read(fuse, offset); return 0; } EXPORT_SYMBOL(tegra_fuse_readl); static void tegra_enable_fuse_clk(void __iomem *base) { u32 reg; reg = readl_relaxed(base + 0x48); reg |= 1 << 28; writel(reg, base + 0x48); /* * Enable FUSE clock. This needs to be hardcoded because the clock * subsystem is not active during early boot. */ reg = readl(base + 0x14); reg |= 1 << 7; writel(reg, base + 0x14); } static int __init tegra_init_fuse(void) { const struct of_device_id *match; struct device_node *np; struct resource regs; tegra_init_apbmisc(); np = of_find_matching_node_and_match(NULL, tegra_fuse_match, &match); if (!np) { /* * Fall back to legacy initialization for 32-bit ARM only. All * 64-bit ARM device tree files for Tegra are required to have * a FUSE node. * * This is for backwards-compatibility with old device trees * that didn't contain a FUSE node. */ if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) { u8 chip = tegra_get_chip_id(); regs.start = 0x7000f800; regs.end = 0x7000fbff; regs.flags = IORESOURCE_MEM; switch (chip) { #ifdef CONFIG_ARCH_TEGRA_2x_SOC case TEGRA20: fuse->soc = &tegra20_fuse_soc; break; #endif #ifdef CONFIG_ARCH_TEGRA_3x_SOC case TEGRA30: fuse->soc = &tegra30_fuse_soc; break; #endif #ifdef CONFIG_ARCH_TEGRA_114_SOC case TEGRA114: fuse->soc = &tegra114_fuse_soc; break; #endif #ifdef CONFIG_ARCH_TEGRA_124_SOC case TEGRA124: fuse->soc = &tegra124_fuse_soc; break; #endif default: pr_warn("Unsupported SoC: %02x\n", chip); break; } } else { /* * At this point we're not running on Tegra, so play * nice with multi-platform kernels. */ return 0; } } else { /* * Extract information from the device tree if we've found a * matching node. */ if (of_address_to_resource(np, 0, &regs) < 0) { pr_err("failed to get FUSE register\n"); return -ENXIO; } fuse->soc = match->data; } np = of_find_matching_node(NULL, car_match); if (np) { void __iomem *base = of_iomap(np, 0); if (base) { tegra_enable_fuse_clk(base); iounmap(base); } else { pr_err("failed to map clock registers\n"); return -ENXIO; } } fuse->base = ioremap_nocache(regs.start, resource_size(&regs)); if (!fuse->base) { pr_err("failed to map FUSE registers\n"); return -ENXIO; } fuse->soc->init(fuse); pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n", tegra_revision_name[tegra_sku_info.revision], tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id, tegra_sku_info.soc_process_id); pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n", tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id); return 0; } early_initcall(tegra_init_fuse);
gpl-2.0
temasek/lge-kernel-star
drivers/isdn/gigaset/capi.c
1954
69219
/* * Kernel CAPI interface for the Gigaset driver * * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>. * * ===================================================================== * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * ===================================================================== */ #include "gigaset.h" #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/isdn/capilli.h> #include <linux/isdn/capicmd.h> #include <linux/isdn/capiutil.h> /* missing from kernelcapi.h */ #define CapiNcpiNotSupportedByProtocol 0x0001 #define CapiFlagsNotSupportedByProtocol 0x0002 #define CapiAlertAlreadySent 0x0003 #define CapiFacilitySpecificFunctionNotSupported 0x3011 /* missing from capicmd.h */ #define CAPI_CONNECT_IND_BASELEN (CAPI_MSG_BASELEN+4+2+8*1) #define CAPI_CONNECT_ACTIVE_IND_BASELEN (CAPI_MSG_BASELEN+4+3*1) #define CAPI_CONNECT_B3_IND_BASELEN (CAPI_MSG_BASELEN+4+1) #define CAPI_CONNECT_B3_ACTIVE_IND_BASELEN (CAPI_MSG_BASELEN+4+1) #define CAPI_DATA_B3_REQ_LEN64 (CAPI_MSG_BASELEN+4+4+2+2+2+8) #define CAPI_DATA_B3_CONF_LEN (CAPI_MSG_BASELEN+4+2+2) #define CAPI_DISCONNECT_IND_LEN (CAPI_MSG_BASELEN+4+2) #define CAPI_DISCONNECT_B3_IND_BASELEN (CAPI_MSG_BASELEN+4+2+1) #define CAPI_FACILITY_CONF_BASELEN (CAPI_MSG_BASELEN+4+2+2+1) /* most _CONF messages contain only Controller/PLCI/NCCI and Info parameters */ #define CAPI_STDCONF_LEN (CAPI_MSG_BASELEN+4+2) #define CAPI_FACILITY_HANDSET 0x0000 #define CAPI_FACILITY_DTMF 0x0001 #define CAPI_FACILITY_V42BIS 0x0002 #define CAPI_FACILITY_SUPPSVC 0x0003 #define CAPI_FACILITY_WAKEUP 0x0004 #define CAPI_FACILITY_LI 0x0005 #define CAPI_SUPPSVC_GETSUPPORTED 0x0000 #define CAPI_SUPPSVC_LISTEN 0x0001 /* missing from capiutil.h */ #define CAPIMSG_PLCI_PART(m) CAPIMSG_U8(m, 9) #define CAPIMSG_NCCI_PART(m) CAPIMSG_U16(m, 10) #define CAPIMSG_HANDLE_REQ(m) CAPIMSG_U16(m, 18) /* DATA_B3_REQ/_IND only! */ #define CAPIMSG_FLAGS(m) CAPIMSG_U16(m, 20) #define CAPIMSG_SETCONTROLLER(m, contr) capimsg_setu8(m, 8, contr) #define CAPIMSG_SETPLCI_PART(m, plci) capimsg_setu8(m, 9, plci) #define CAPIMSG_SETNCCI_PART(m, ncci) capimsg_setu16(m, 10, ncci) #define CAPIMSG_SETFLAGS(m, flags) capimsg_setu16(m, 20, flags) /* parameters with differing location in DATA_B3_CONF/_RESP: */ #define CAPIMSG_SETHANDLE_CONF(m, handle) capimsg_setu16(m, 12, handle) #define CAPIMSG_SETINFO_CONF(m, info) capimsg_setu16(m, 14, info) /* Flags (DATA_B3_REQ/_IND) */ #define CAPI_FLAGS_DELIVERY_CONFIRMATION 0x04 #define CAPI_FLAGS_RESERVED (~0x1f) /* buffer sizes */ #define MAX_BC_OCTETS 11 #define MAX_HLC_OCTETS 3 #define MAX_NUMBER_DIGITS 20 #define MAX_FMT_IE_LEN 20 /* values for bcs->apconnstate */ #define APCONN_NONE 0 /* inactive/listening */ #define APCONN_SETUP 1 /* connecting */ #define APCONN_ACTIVE 2 /* B channel up */ /* registered application data structure */ struct gigaset_capi_appl { struct list_head ctrlist; struct gigaset_capi_appl *bcnext; u16 id; struct capi_register_params rp; u16 nextMessageNumber; u32 listenInfoMask; u32 listenCIPmask; }; /* CAPI specific controller data structure */ struct gigaset_capi_ctr { struct capi_ctr ctr; struct list_head appls; struct sk_buff_head sendqueue; atomic_t sendqlen; /* two _cmsg structures possibly used concurrently: */ _cmsg hcmsg; /* for message composition triggered from hardware */ _cmsg acmsg; /* for dissection of messages sent from application */ u8 bc_buf[MAX_BC_OCTETS+1]; u8 hlc_buf[MAX_HLC_OCTETS+1]; u8 cgpty_buf[MAX_NUMBER_DIGITS+3]; u8 cdpty_buf[MAX_NUMBER_DIGITS+2]; }; /* CIP Value table (from CAPI 2.0 standard, ch. 6.1) */ static struct { u8 *bc; u8 *hlc; } cip2bchlc[] = { [1] = { "8090A3", NULL }, /* Speech (A-law) */ [2] = { "8890", NULL }, /* Unrestricted digital information */ [3] = { "8990", NULL }, /* Restricted digital information */ [4] = { "9090A3", NULL }, /* 3,1 kHz audio (A-law) */ [5] = { "9190", NULL }, /* 7 kHz audio */ [6] = { "9890", NULL }, /* Video */ [7] = { "88C0C6E6", NULL }, /* Packet mode */ [8] = { "8890218F", NULL }, /* 56 kbit/s rate adaptation */ [9] = { "9190A5", NULL }, /* Unrestricted digital information with tones/announcements */ [16] = { "8090A3", "9181" }, /* Telephony */ [17] = { "9090A3", "9184" }, /* Group 2/3 facsimile */ [18] = { "8890", "91A1" }, /* Group 4 facsimile Class 1 */ [19] = { "8890", "91A4" }, /* Teletex service basic and mixed mode and Group 4 facsimile service Classes II and III */ [20] = { "8890", "91A8" }, /* Teletex service basic and processable mode */ [21] = { "8890", "91B1" }, /* Teletex service basic mode */ [22] = { "8890", "91B2" }, /* International interworking for Videotex */ [23] = { "8890", "91B5" }, /* Telex */ [24] = { "8890", "91B8" }, /* Message Handling Systems in accordance with X.400 */ [25] = { "8890", "91C1" }, /* OSI application in accordance with X.200 */ [26] = { "9190A5", "9181" }, /* 7 kHz telephony */ [27] = { "9190A5", "916001" }, /* Video telephony, first connection */ [28] = { "8890", "916002" }, /* Video telephony, second connection */ }; /* * helper functions * ================ */ /* * emit unsupported parameter warning */ static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param, char *msgname, char *paramname) { if (param && *param) dev_warn(cs->dev, "%s: ignoring unsupported parameter: %s\n", msgname, paramname); } /* * convert an IE from Gigaset hex string to ETSI binary representation * including length byte * return value: result length, -1 on error */ static int encode_ie(char *in, u8 *out, int maxlen) { int l = 0; while (*in) { if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen) return -1; out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]); in += 2; } out[0] = l; return l; } /* * convert an IE from ETSI binary representation including length byte * to Gigaset hex string */ static void decode_ie(u8 *in, char *out) { int i = *in; while (i-- > 0) { /* ToDo: conversion to upper case necessary? */ *out++ = toupper(hex_asc_hi(*++in)); *out++ = toupper(hex_asc_lo(*in)); } } /* * retrieve application data structure for an application ID */ static inline struct gigaset_capi_appl * get_appl(struct gigaset_capi_ctr *iif, u16 appl) { struct gigaset_capi_appl *ap; list_for_each_entry(ap, &iif->appls, ctrlist) if (ap->id == appl) return ap; return NULL; } /* * dump CAPI message to kernel messages for debugging */ static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p) { #ifdef CONFIG_GIGASET_DEBUG _cdebbuf *cdb; if (!(gigaset_debuglevel & level)) return; cdb = capi_cmsg2str(p); if (cdb) { gig_dbg(level, "%s: [%d] %s", tag, p->ApplId, cdb->buf); cdebbuf_free(cdb); } else { gig_dbg(level, "%s: [%d] %s", tag, p->ApplId, capi_cmd2str(p->Command, p->Subcommand)); } #endif } static inline void dump_rawmsg(enum debuglevel level, const char *tag, unsigned char *data) { #ifdef CONFIG_GIGASET_DEBUG char *dbgline; int i, l; if (!(gigaset_debuglevel & level)) return; l = CAPIMSG_LEN(data); if (l < 12) { gig_dbg(level, "%s: ??? LEN=%04d", tag, l); return; } gig_dbg(level, "%s: 0x%02x:0x%02x: ID=%03d #0x%04x LEN=%04d NCCI=0x%x", tag, CAPIMSG_COMMAND(data), CAPIMSG_SUBCOMMAND(data), CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l, CAPIMSG_CONTROL(data)); l -= 12; dbgline = kmalloc(3*l, GFP_ATOMIC); if (!dbgline) return; for (i = 0; i < l; i++) { dbgline[3*i] = hex_asc_hi(data[12+i]); dbgline[3*i+1] = hex_asc_lo(data[12+i]); dbgline[3*i+2] = ' '; } dbgline[3*l-1] = '\0'; gig_dbg(level, " %s", dbgline); kfree(dbgline); if (CAPIMSG_COMMAND(data) == CAPI_DATA_B3 && (CAPIMSG_SUBCOMMAND(data) == CAPI_REQ || CAPIMSG_SUBCOMMAND(data) == CAPI_IND)) { l = CAPIMSG_DATALEN(data); gig_dbg(level, " DataLength=%d", l); if (l <= 0 || !(gigaset_debuglevel & DEBUG_LLDATA)) return; if (l > 64) l = 64; /* arbitrary limit */ dbgline = kmalloc(3*l, GFP_ATOMIC); if (!dbgline) return; data += CAPIMSG_LEN(data); for (i = 0; i < l; i++) { dbgline[3*i] = hex_asc_hi(data[i]); dbgline[3*i+1] = hex_asc_lo(data[i]); dbgline[3*i+2] = ' '; } dbgline[3*l-1] = '\0'; gig_dbg(level, " %s", dbgline); kfree(dbgline); } #endif } /* * format CAPI IE as string */ static const char *format_ie(const char *ie) { static char result[3*MAX_FMT_IE_LEN]; int len, count; char *pout = result; if (!ie) return "NULL"; count = len = ie[0]; if (count > MAX_FMT_IE_LEN) count = MAX_FMT_IE_LEN-1; while (count--) { *pout++ = hex_asc_hi(*++ie); *pout++ = hex_asc_lo(*ie); *pout++ = ' '; } if (len > MAX_FMT_IE_LEN) { *pout++ = '.'; *pout++ = '.'; *pout++ = '.'; } *--pout = 0; return result; } /* * emit DATA_B3_CONF message */ static void send_data_b3_conf(struct cardstate *cs, struct capi_ctr *ctr, u16 appl, u16 msgid, int channel, u16 handle, u16 info) { struct sk_buff *cskb; u8 *msg; cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC); if (!cskb) { dev_err(cs->dev, "%s: out of memory\n", __func__); return; } /* frequent message, avoid _cmsg overhead */ msg = __skb_put(cskb, CAPI_DATA_B3_CONF_LEN); CAPIMSG_SETLEN(msg, CAPI_DATA_B3_CONF_LEN); CAPIMSG_SETAPPID(msg, appl); CAPIMSG_SETCOMMAND(msg, CAPI_DATA_B3); CAPIMSG_SETSUBCOMMAND(msg, CAPI_CONF); CAPIMSG_SETMSGID(msg, msgid); CAPIMSG_SETCONTROLLER(msg, ctr->cnr); CAPIMSG_SETPLCI_PART(msg, channel); CAPIMSG_SETNCCI_PART(msg, 1); CAPIMSG_SETHANDLE_CONF(msg, handle); CAPIMSG_SETINFO_CONF(msg, info); /* emit message */ dump_rawmsg(DEBUG_MCMD, __func__, msg); capi_ctr_handle_message(ctr, appl, cskb); } /* * driver interface functions * ========================== */ /** * gigaset_skb_sent() - acknowledge transmission of outgoing skb * @bcs: B channel descriptor structure. * @skb: sent data. * * Called by hardware module {bas,ser,usb}_gigaset when the data in a * skb has been successfully sent, for signalling completion to the LL. */ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb) { struct cardstate *cs = bcs->cs; struct gigaset_capi_ctr *iif = cs->iif; struct gigaset_capi_appl *ap = bcs->ap; unsigned char *req = skb_mac_header(dskb); u16 flags; /* update statistics */ ++bcs->trans_up; if (!ap) { gig_dbg(DEBUG_MCMD, "%s: application gone", __func__); return; } /* don't send further B3 messages if disconnected */ if (bcs->apconnstate < APCONN_ACTIVE) { gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__); return; } /* * send DATA_B3_CONF if "delivery confirmation" bit was set in request; * otherwise it has already been sent by do_data_b3_req() */ flags = CAPIMSG_FLAGS(req); if (flags & CAPI_FLAGS_DELIVERY_CONFIRMATION) send_data_b3_conf(cs, &iif->ctr, ap->id, CAPIMSG_MSGID(req), bcs->channel + 1, CAPIMSG_HANDLE_REQ(req), (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION) ? CapiFlagsNotSupportedByProtocol : CAPI_NOERROR); } EXPORT_SYMBOL_GPL(gigaset_skb_sent); /** * gigaset_skb_rcvd() - pass received skb to LL * @bcs: B channel descriptor structure. * @skb: received data. * * Called by hardware module {bas,ser,usb}_gigaset when user data has * been successfully received, for passing to the LL. * Warning: skb must not be accessed anymore! */ void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb) { struct cardstate *cs = bcs->cs; struct gigaset_capi_ctr *iif = cs->iif; struct gigaset_capi_appl *ap = bcs->ap; int len = skb->len; /* update statistics */ bcs->trans_down++; if (!ap) { gig_dbg(DEBUG_MCMD, "%s: application gone", __func__); dev_kfree_skb_any(skb); return; } /* don't send further B3 messages if disconnected */ if (bcs->apconnstate < APCONN_ACTIVE) { gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__); dev_kfree_skb_any(skb); return; } /* * prepend DATA_B3_IND message to payload * Parameters: NCCI = 1, all others 0/unused * frequent message, avoid _cmsg overhead */ skb_push(skb, CAPI_DATA_B3_REQ_LEN); CAPIMSG_SETLEN(skb->data, CAPI_DATA_B3_REQ_LEN); CAPIMSG_SETAPPID(skb->data, ap->id); CAPIMSG_SETCOMMAND(skb->data, CAPI_DATA_B3); CAPIMSG_SETSUBCOMMAND(skb->data, CAPI_IND); CAPIMSG_SETMSGID(skb->data, ap->nextMessageNumber++); CAPIMSG_SETCONTROLLER(skb->data, iif->ctr.cnr); CAPIMSG_SETPLCI_PART(skb->data, bcs->channel + 1); CAPIMSG_SETNCCI_PART(skb->data, 1); /* Data parameter not used */ CAPIMSG_SETDATALEN(skb->data, len); /* Data handle parameter not used */ CAPIMSG_SETFLAGS(skb->data, 0); /* Data64 parameter not present */ /* emit message */ dump_rawmsg(DEBUG_MCMD, __func__, skb->data); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } EXPORT_SYMBOL_GPL(gigaset_skb_rcvd); /** * gigaset_isdn_rcv_err() - signal receive error * @bcs: B channel descriptor structure. * * Called by hardware module {bas,ser,usb}_gigaset when a receive error * has occurred, for signalling to the LL. */ void gigaset_isdn_rcv_err(struct bc_state *bcs) { /* if currently ignoring packets, just count down */ if (bcs->ignore) { bcs->ignore--; return; } /* update statistics */ bcs->corrupted++; /* ToDo: signal error -> LL */ } EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err); /** * gigaset_isdn_icall() - signal incoming call * @at_state: connection state structure. * * Called by main module at tasklet level to notify the LL that an incoming * call has been received. @at_state contains the parameters of the call. * * Return value: call disposition (ICALL_*) */ int gigaset_isdn_icall(struct at_state_t *at_state) { struct cardstate *cs = at_state->cs; struct bc_state *bcs = at_state->bcs; struct gigaset_capi_ctr *iif = cs->iif; struct gigaset_capi_appl *ap; u32 actCIPmask; struct sk_buff *skb; unsigned int msgsize; unsigned long flags; int i; /* * ToDo: signal calls without a free B channel, too * (requires a u8 handle for the at_state structure that can * be stored in the PLCI and used in the CONNECT_RESP message * handler to retrieve it) */ if (!bcs) return ICALL_IGNORE; /* prepare CONNECT_IND message, using B channel number as PLCI */ capi_cmsg_header(&iif->hcmsg, 0, CAPI_CONNECT, CAPI_IND, 0, iif->ctr.cnr | ((bcs->channel + 1) << 8)); /* minimum size, all structs empty */ msgsize = CAPI_CONNECT_IND_BASELEN; /* Bearer Capability (mandatory) */ if (at_state->str_var[STR_ZBC]) { /* pass on BC from Gigaset */ if (encode_ie(at_state->str_var[STR_ZBC], iif->bc_buf, MAX_BC_OCTETS) < 0) { dev_warn(cs->dev, "RING ignored - bad BC %s\n", at_state->str_var[STR_ZBC]); return ICALL_IGNORE; } /* look up corresponding CIP value */ iif->hcmsg.CIPValue = 0; /* default if nothing found */ for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++) if (cip2bchlc[i].bc != NULL && cip2bchlc[i].hlc == NULL && !strcmp(cip2bchlc[i].bc, at_state->str_var[STR_ZBC])) { iif->hcmsg.CIPValue = i; break; } } else { /* no BC (internal call): assume CIP 1 (speech, A-law) */ iif->hcmsg.CIPValue = 1; encode_ie(cip2bchlc[1].bc, iif->bc_buf, MAX_BC_OCTETS); } iif->hcmsg.BC = iif->bc_buf; msgsize += iif->hcmsg.BC[0]; /* High Layer Compatibility (optional) */ if (at_state->str_var[STR_ZHLC]) { /* pass on HLC from Gigaset */ if (encode_ie(at_state->str_var[STR_ZHLC], iif->hlc_buf, MAX_HLC_OCTETS) < 0) { dev_warn(cs->dev, "RING ignored - bad HLC %s\n", at_state->str_var[STR_ZHLC]); return ICALL_IGNORE; } iif->hcmsg.HLC = iif->hlc_buf; msgsize += iif->hcmsg.HLC[0]; /* look up corresponding CIP value */ /* keep BC based CIP value if none found */ if (at_state->str_var[STR_ZBC]) for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++) if (cip2bchlc[i].hlc != NULL && !strcmp(cip2bchlc[i].hlc, at_state->str_var[STR_ZHLC]) && !strcmp(cip2bchlc[i].bc, at_state->str_var[STR_ZBC])) { iif->hcmsg.CIPValue = i; break; } } /* Called Party Number (optional) */ if (at_state->str_var[STR_ZCPN]) { i = strlen(at_state->str_var[STR_ZCPN]); if (i > MAX_NUMBER_DIGITS) { dev_warn(cs->dev, "RING ignored - bad number %s\n", at_state->str_var[STR_ZBC]); return ICALL_IGNORE; } iif->cdpty_buf[0] = i + 1; iif->cdpty_buf[1] = 0x80; /* type / numbering plan unknown */ memcpy(iif->cdpty_buf+2, at_state->str_var[STR_ZCPN], i); iif->hcmsg.CalledPartyNumber = iif->cdpty_buf; msgsize += iif->hcmsg.CalledPartyNumber[0]; } /* Calling Party Number (optional) */ if (at_state->str_var[STR_NMBR]) { i = strlen(at_state->str_var[STR_NMBR]); if (i > MAX_NUMBER_DIGITS) { dev_warn(cs->dev, "RING ignored - bad number %s\n", at_state->str_var[STR_ZBC]); return ICALL_IGNORE; } iif->cgpty_buf[0] = i + 2; iif->cgpty_buf[1] = 0x00; /* type / numbering plan unknown */ iif->cgpty_buf[2] = 0x80; /* pres. allowed, not screened */ memcpy(iif->cgpty_buf+3, at_state->str_var[STR_NMBR], i); iif->hcmsg.CallingPartyNumber = iif->cgpty_buf; msgsize += iif->hcmsg.CallingPartyNumber[0]; } /* remaining parameters (not supported, always left NULL): * - CalledPartySubaddress * - CallingPartySubaddress * - AdditionalInfo * - BChannelinformation * - Keypadfacility * - Useruserdata * - Facilitydataarray */ gig_dbg(DEBUG_CMD, "icall: PLCI %x CIP %d BC %s", iif->hcmsg.adr.adrPLCI, iif->hcmsg.CIPValue, format_ie(iif->hcmsg.BC)); gig_dbg(DEBUG_CMD, "icall: HLC %s", format_ie(iif->hcmsg.HLC)); gig_dbg(DEBUG_CMD, "icall: CgPty %s", format_ie(iif->hcmsg.CallingPartyNumber)); gig_dbg(DEBUG_CMD, "icall: CdPty %s", format_ie(iif->hcmsg.CalledPartyNumber)); /* scan application list for matching listeners */ spin_lock_irqsave(&bcs->aplock, flags); if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE) { dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n", __func__, bcs->ap, bcs->apconnstate); bcs->ap = NULL; bcs->apconnstate = APCONN_NONE; } spin_unlock_irqrestore(&bcs->aplock, flags); actCIPmask = 1 | (1 << iif->hcmsg.CIPValue); list_for_each_entry(ap, &iif->appls, ctrlist) if (actCIPmask & ap->listenCIPmask) { /* build CONNECT_IND message for this application */ iif->hcmsg.ApplId = ap->id; iif->hcmsg.Messagenumber = ap->nextMessageNumber++; skb = alloc_skb(msgsize, GFP_ATOMIC); if (!skb) { dev_err(cs->dev, "%s: out of memory\n", __func__); break; } capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize)); dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); /* add to listeners on this B channel, update state */ spin_lock_irqsave(&bcs->aplock, flags); ap->bcnext = bcs->ap; bcs->ap = ap; bcs->chstate |= CHS_NOTIFY_LL; bcs->apconnstate = APCONN_SETUP; spin_unlock_irqrestore(&bcs->aplock, flags); /* emit message */ capi_ctr_handle_message(&iif->ctr, ap->id, skb); } /* * Return "accept" if any listeners. * Gigaset will send ALERTING. * There doesn't seem to be a way to avoid this. */ return bcs->ap ? ICALL_ACCEPT : ICALL_IGNORE; } /* * send a DISCONNECT_IND message to an application * does not sleep, clobbers the controller's hcmsg structure */ static void send_disconnect_ind(struct bc_state *bcs, struct gigaset_capi_appl *ap, u16 reason) { struct cardstate *cs = bcs->cs; struct gigaset_capi_ctr *iif = cs->iif; struct sk_buff *skb; if (bcs->apconnstate == APCONN_NONE) return; capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT, CAPI_IND, ap->nextMessageNumber++, iif->ctr.cnr | ((bcs->channel + 1) << 8)); iif->hcmsg.Reason = reason; skb = alloc_skb(CAPI_DISCONNECT_IND_LEN, GFP_ATOMIC); if (!skb) { dev_err(cs->dev, "%s: out of memory\n", __func__); return; } capi_cmsg2message(&iif->hcmsg, __skb_put(skb, CAPI_DISCONNECT_IND_LEN)); dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } /* * send a DISCONNECT_B3_IND message to an application * Parameters: NCCI = 1, NCPI empty, Reason_B3 = 0 * does not sleep, clobbers the controller's hcmsg structure */ static void send_disconnect_b3_ind(struct bc_state *bcs, struct gigaset_capi_appl *ap) { struct cardstate *cs = bcs->cs; struct gigaset_capi_ctr *iif = cs->iif; struct sk_buff *skb; /* nothing to do if no logical connection active */ if (bcs->apconnstate < APCONN_ACTIVE) return; bcs->apconnstate = APCONN_SETUP; capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND, ap->nextMessageNumber++, iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16)); skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_ATOMIC); if (!skb) { dev_err(cs->dev, "%s: out of memory\n", __func__); return; } capi_cmsg2message(&iif->hcmsg, __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN)); dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } /** * gigaset_isdn_connD() - signal D channel connect * @bcs: B channel descriptor structure. * * Called by main module at tasklet level to notify the LL that the D channel * connection has been established. */ void gigaset_isdn_connD(struct bc_state *bcs) { struct cardstate *cs = bcs->cs; struct gigaset_capi_ctr *iif = cs->iif; struct gigaset_capi_appl *ap; struct sk_buff *skb; unsigned int msgsize; unsigned long flags; spin_lock_irqsave(&bcs->aplock, flags); ap = bcs->ap; if (!ap) { spin_unlock_irqrestore(&bcs->aplock, flags); gig_dbg(DEBUG_CMD, "%s: application gone", __func__); return; } if (bcs->apconnstate == APCONN_NONE) { spin_unlock_irqrestore(&bcs->aplock, flags); dev_warn(cs->dev, "%s: application %u not connected\n", __func__, ap->id); return; } spin_unlock_irqrestore(&bcs->aplock, flags); while (ap->bcnext) { /* this should never happen */ dev_warn(cs->dev, "%s: dropping extra application %u\n", __func__, ap->bcnext->id); send_disconnect_ind(bcs, ap->bcnext, CapiCallGivenToOtherApplication); ap->bcnext = ap->bcnext->bcnext; } /* prepare CONNECT_ACTIVE_IND message * Note: LLC not supported by device */ capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_CONNECT_ACTIVE, CAPI_IND, ap->nextMessageNumber++, iif->ctr.cnr | ((bcs->channel + 1) << 8)); /* minimum size, all structs empty */ msgsize = CAPI_CONNECT_ACTIVE_IND_BASELEN; /* ToDo: set parameter: Connected number * (requires ev-layer state machine extension to collect * ZCON device reply) */ /* build and emit CONNECT_ACTIVE_IND message */ skb = alloc_skb(msgsize, GFP_ATOMIC); if (!skb) { dev_err(cs->dev, "%s: out of memory\n", __func__); return; } capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize)); dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } /** * gigaset_isdn_hupD() - signal D channel hangup * @bcs: B channel descriptor structure. * * Called by main module at tasklet level to notify the LL that the D channel * connection has been shut down. */ void gigaset_isdn_hupD(struct bc_state *bcs) { struct gigaset_capi_appl *ap; unsigned long flags; /* * ToDo: pass on reason code reported by device * (requires ev-layer state machine extension to collect * ZCAU device reply) */ spin_lock_irqsave(&bcs->aplock, flags); while (bcs->ap != NULL) { ap = bcs->ap; bcs->ap = ap->bcnext; spin_unlock_irqrestore(&bcs->aplock, flags); send_disconnect_b3_ind(bcs, ap); send_disconnect_ind(bcs, ap, 0); spin_lock_irqsave(&bcs->aplock, flags); } bcs->apconnstate = APCONN_NONE; spin_unlock_irqrestore(&bcs->aplock, flags); } /** * gigaset_isdn_connB() - signal B channel connect * @bcs: B channel descriptor structure. * * Called by main module at tasklet level to notify the LL that the B channel * connection has been established. */ void gigaset_isdn_connB(struct bc_state *bcs) { struct cardstate *cs = bcs->cs; struct gigaset_capi_ctr *iif = cs->iif; struct gigaset_capi_appl *ap; struct sk_buff *skb; unsigned long flags; unsigned int msgsize; u8 command; spin_lock_irqsave(&bcs->aplock, flags); ap = bcs->ap; if (!ap) { spin_unlock_irqrestore(&bcs->aplock, flags); gig_dbg(DEBUG_CMD, "%s: application gone", __func__); return; } if (!bcs->apconnstate) { spin_unlock_irqrestore(&bcs->aplock, flags); dev_warn(cs->dev, "%s: application %u not connected\n", __func__, ap->id); return; } /* * emit CONNECT_B3_ACTIVE_IND if we already got CONNECT_B3_REQ; * otherwise we have to emit CONNECT_B3_IND first, and follow up with * CONNECT_B3_ACTIVE_IND in reply to CONNECT_B3_RESP * Parameters in both cases always: NCCI = 1, NCPI empty */ if (bcs->apconnstate >= APCONN_ACTIVE) { command = CAPI_CONNECT_B3_ACTIVE; msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN; } else { command = CAPI_CONNECT_B3; msgsize = CAPI_CONNECT_B3_IND_BASELEN; } bcs->apconnstate = APCONN_ACTIVE; spin_unlock_irqrestore(&bcs->aplock, flags); while (ap->bcnext) { /* this should never happen */ dev_warn(cs->dev, "%s: dropping extra application %u\n", __func__, ap->bcnext->id); send_disconnect_ind(bcs, ap->bcnext, CapiCallGivenToOtherApplication); ap->bcnext = ap->bcnext->bcnext; } capi_cmsg_header(&iif->hcmsg, ap->id, command, CAPI_IND, ap->nextMessageNumber++, iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16)); skb = alloc_skb(msgsize, GFP_ATOMIC); if (!skb) { dev_err(cs->dev, "%s: out of memory\n", __func__); return; } capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize)); dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } /** * gigaset_isdn_hupB() - signal B channel hangup * @bcs: B channel descriptor structure. * * Called by main module to notify the LL that the B channel connection has * been shut down. */ void gigaset_isdn_hupB(struct bc_state *bcs) { struct gigaset_capi_appl *ap = bcs->ap; /* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */ if (!ap) { gig_dbg(DEBUG_CMD, "%s: application gone", __func__); return; } send_disconnect_b3_ind(bcs, ap); } /** * gigaset_isdn_start() - signal device availability * @cs: device descriptor structure. * * Called by main module to notify the LL that the device is available for * use. */ void gigaset_isdn_start(struct cardstate *cs) { struct gigaset_capi_ctr *iif = cs->iif; /* fill profile data: manufacturer name */ strcpy(iif->ctr.manu, "Siemens"); /* CAPI and device version */ iif->ctr.version.majorversion = 2; /* CAPI 2.0 */ iif->ctr.version.minorversion = 0; /* ToDo: check/assert cs->gotfwver? */ iif->ctr.version.majormanuversion = cs->fwver[0]; iif->ctr.version.minormanuversion = cs->fwver[1]; /* number of B channels supported */ iif->ctr.profile.nbchannel = cs->channels; /* global options: internal controller, supplementary services */ iif->ctr.profile.goptions = 0x11; /* B1 protocols: 64 kbit/s HDLC or transparent */ iif->ctr.profile.support1 = 0x03; /* B2 protocols: transparent only */ /* ToDo: X.75 SLP ? */ iif->ctr.profile.support2 = 0x02; /* B3 protocols: transparent only */ iif->ctr.profile.support3 = 0x01; /* no serial number */ strcpy(iif->ctr.serial, "0"); capi_ctr_ready(&iif->ctr); } /** * gigaset_isdn_stop() - signal device unavailability * @cs: device descriptor structure. * * Called by main module to notify the LL that the device is no longer * available for use. */ void gigaset_isdn_stop(struct cardstate *cs) { struct gigaset_capi_ctr *iif = cs->iif; capi_ctr_down(&iif->ctr); } /* * kernel CAPI callback methods * ============================ */ /* * register CAPI application */ static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl, capi_register_params *rp) { struct gigaset_capi_ctr *iif = container_of(ctr, struct gigaset_capi_ctr, ctr); struct cardstate *cs = ctr->driverdata; struct gigaset_capi_appl *ap; gig_dbg(DEBUG_CMD, "%s [%u] l3cnt=%u blkcnt=%u blklen=%u", __func__, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen); list_for_each_entry(ap, &iif->appls, ctrlist) if (ap->id == appl) { dev_notice(cs->dev, "application %u already registered\n", appl); return; } ap = kzalloc(sizeof(*ap), GFP_KERNEL); if (!ap) { dev_err(cs->dev, "%s: out of memory\n", __func__); return; } ap->id = appl; ap->rp = *rp; list_add(&ap->ctrlist, &iif->appls); dev_info(cs->dev, "application %u registered\n", ap->id); } /* * remove CAPI application from channel * helper function to keep indentation levels down and stay in 80 columns */ static inline void remove_appl_from_channel(struct bc_state *bcs, struct gigaset_capi_appl *ap) { struct cardstate *cs = bcs->cs; struct gigaset_capi_appl *bcap; unsigned long flags; int prevconnstate; spin_lock_irqsave(&bcs->aplock, flags); bcap = bcs->ap; if (bcap == NULL) { spin_unlock_irqrestore(&bcs->aplock, flags); return; } /* check first application on channel */ if (bcap == ap) { bcs->ap = ap->bcnext; if (bcs->ap != NULL) { spin_unlock_irqrestore(&bcs->aplock, flags); return; } /* none left, clear channel state */ prevconnstate = bcs->apconnstate; bcs->apconnstate = APCONN_NONE; spin_unlock_irqrestore(&bcs->aplock, flags); if (prevconnstate == APCONN_ACTIVE) { dev_notice(cs->dev, "%s: hanging up channel %u\n", __func__, bcs->channel); gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL); gigaset_schedule_event(cs); } return; } /* check remaining list */ do { if (bcap->bcnext == ap) { bcap->bcnext = bcap->bcnext->bcnext; spin_unlock_irqrestore(&bcs->aplock, flags); return; } bcap = bcap->bcnext; } while (bcap != NULL); spin_unlock_irqrestore(&bcs->aplock, flags); } /* * release CAPI application */ static void gigaset_release_appl(struct capi_ctr *ctr, u16 appl) { struct gigaset_capi_ctr *iif = container_of(ctr, struct gigaset_capi_ctr, ctr); struct cardstate *cs = iif->ctr.driverdata; struct gigaset_capi_appl *ap, *tmp; unsigned ch; gig_dbg(DEBUG_CMD, "%s [%u]", __func__, appl); list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist) if (ap->id == appl) { /* remove from any channels */ for (ch = 0; ch < cs->channels; ch++) remove_appl_from_channel(&cs->bcs[ch], ap); /* remove from registration list */ list_del(&ap->ctrlist); kfree(ap); dev_info(cs->dev, "application %u released\n", appl); } } /* * ===================================================================== * outgoing CAPI message handler * ===================================================================== */ /* * helper function: emit reply message with given Info value */ static void send_conf(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb, u16 info) { /* * _CONF replies always only have NCCI and Info parameters * so they'll fit into the _REQ message skb */ capi_cmsg_answer(&iif->acmsg); iif->acmsg.Info = info; capi_cmsg2message(&iif->acmsg, skb->data); __skb_trim(skb, CAPI_STDCONF_LEN); dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } /* * process FACILITY_REQ message */ static void do_facility_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { struct cardstate *cs = iif->ctr.driverdata; _cmsg *cmsg = &iif->acmsg; struct sk_buff *cskb; u8 *pparam; unsigned int msgsize = CAPI_FACILITY_CONF_BASELEN; u16 function, info; static u8 confparam[10]; /* max. 9 octets + length byte */ /* decode message */ capi_message2cmsg(cmsg, skb->data); dump_cmsg(DEBUG_CMD, __func__, cmsg); /* * Facility Request Parameter is not decoded by capi_message2cmsg() * encoding depends on Facility Selector */ switch (cmsg->FacilitySelector) { case CAPI_FACILITY_DTMF: /* ToDo */ info = CapiFacilityNotSupported; confparam[0] = 2; /* length */ /* DTMF information: Unknown DTMF request */ capimsg_setu16(confparam, 1, 2); break; case CAPI_FACILITY_V42BIS: /* not supported */ info = CapiFacilityNotSupported; confparam[0] = 2; /* length */ /* V.42 bis information: not available */ capimsg_setu16(confparam, 1, 1); break; case CAPI_FACILITY_SUPPSVC: /* decode Function parameter */ pparam = cmsg->FacilityRequestParameter; if (pparam == NULL || pparam[0] < 2) { dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ", "Facility Request Parameter"); send_conf(iif, ap, skb, CapiIllMessageParmCoding); return; } function = CAPIMSG_U16(pparam, 1); switch (function) { case CAPI_SUPPSVC_GETSUPPORTED: info = CapiSuccess; /* Supplementary Service specific parameter */ confparam[3] = 6; /* length */ /* Supplementary services info: Success */ capimsg_setu16(confparam, 4, CapiSuccess); /* Supported Services: none */ capimsg_setu32(confparam, 6, 0); break; case CAPI_SUPPSVC_LISTEN: if (pparam[0] < 7 || pparam[3] < 4) { dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ", "Notification Mask"); send_conf(iif, ap, skb, CapiIllMessageParmCoding); return; } if (CAPIMSG_U32(pparam, 4) != 0) { dev_notice(cs->dev, "%s: unsupported supplementary service notification mask 0x%x\n", "FACILITY_REQ", CAPIMSG_U32(pparam, 4)); info = CapiFacilitySpecificFunctionNotSupported; confparam[3] = 2; /* length */ capimsg_setu16(confparam, 4, CapiSupplementaryServiceNotSupported); } info = CapiSuccess; confparam[3] = 2; /* length */ capimsg_setu16(confparam, 4, CapiSuccess); break; /* ToDo: add supported services */ default: dev_notice(cs->dev, "%s: unsupported supplementary service function 0x%04x\n", "FACILITY_REQ", function); info = CapiFacilitySpecificFunctionNotSupported; /* Supplementary Service specific parameter */ confparam[3] = 2; /* length */ /* Supplementary services info: not supported */ capimsg_setu16(confparam, 4, CapiSupplementaryServiceNotSupported); } /* Facility confirmation parameter */ confparam[0] = confparam[3] + 3; /* total length */ /* Function: copy from _REQ message */ capimsg_setu16(confparam, 1, function); /* Supplementary Service specific parameter already set above */ break; case CAPI_FACILITY_WAKEUP: /* ToDo */ info = CapiFacilityNotSupported; confparam[0] = 2; /* length */ /* Number of accepted awake request parameters: 0 */ capimsg_setu16(confparam, 1, 0); break; default: info = CapiFacilityNotSupported; confparam[0] = 0; /* empty struct */ } /* send FACILITY_CONF with given Info and confirmation parameter */ capi_cmsg_answer(cmsg); cmsg->Info = info; cmsg->FacilityConfirmationParameter = confparam; msgsize += confparam[0]; /* length */ cskb = alloc_skb(msgsize, GFP_ATOMIC); if (!cskb) { dev_err(cs->dev, "%s: out of memory\n", __func__); return; } capi_cmsg2message(cmsg, __skb_put(cskb, msgsize)); dump_cmsg(DEBUG_CMD, __func__, cmsg); capi_ctr_handle_message(&iif->ctr, ap->id, cskb); } /* * process LISTEN_REQ message * just store the masks in the application data structure */ static void do_listen_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { /* decode message */ capi_message2cmsg(&iif->acmsg, skb->data); dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); /* store listening parameters */ ap->listenInfoMask = iif->acmsg.InfoMask; ap->listenCIPmask = iif->acmsg.CIPmask; send_conf(iif, ap, skb, CapiSuccess); } /* * process ALERT_REQ message * nothing to do, Gigaset always alerts anyway */ static void do_alert_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { /* decode message */ capi_message2cmsg(&iif->acmsg, skb->data); dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); send_conf(iif, ap, skb, CapiAlertAlreadySent); } /* * process CONNECT_REQ message * allocate a B channel, prepare dial commands, queue a DIAL event, * emit CONNECT_CONF reply */ static void do_connect_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { struct cardstate *cs = iif->ctr.driverdata; _cmsg *cmsg = &iif->acmsg; struct bc_state *bcs; char **commands; char *s; u8 *pp; unsigned long flags; int i, l, lbc, lhlc; u16 info; /* decode message */ capi_message2cmsg(cmsg, skb->data); dump_cmsg(DEBUG_CMD, __func__, cmsg); /* get free B channel & construct PLCI */ bcs = gigaset_get_free_channel(cs); if (!bcs) { dev_notice(cs->dev, "%s: no B channel available\n", "CONNECT_REQ"); send_conf(iif, ap, skb, CapiNoPlciAvailable); return; } spin_lock_irqsave(&bcs->aplock, flags); if (bcs->ap != NULL || bcs->apconnstate != APCONN_NONE) dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d)\n", __func__, bcs->ap, bcs->apconnstate); ap->bcnext = NULL; bcs->ap = ap; bcs->apconnstate = APCONN_SETUP; spin_unlock_irqrestore(&bcs->aplock, flags); bcs->rx_bufsize = ap->rp.datablklen; dev_kfree_skb(bcs->rx_skb); gigaset_new_rx_skb(bcs); cmsg->adr.adrPLCI |= (bcs->channel + 1) << 8; /* build command table */ commands = kzalloc(AT_NUM*(sizeof *commands), GFP_KERNEL); if (!commands) goto oom; /* encode parameter: Called party number */ pp = cmsg->CalledPartyNumber; if (pp == NULL || *pp == 0) { dev_notice(cs->dev, "%s: %s missing\n", "CONNECT_REQ", "Called party number"); info = CapiIllMessageParmCoding; goto error; } l = *pp++; /* check type of number/numbering plan byte */ switch (*pp) { case 0x80: /* unknown type / unknown numbering plan */ case 0x81: /* unknown type / ISDN/Telephony numbering plan */ break; default: /* others: warn about potential misinterpretation */ dev_notice(cs->dev, "%s: %s type/plan 0x%02x unsupported\n", "CONNECT_REQ", "Called party number", *pp); } pp++; l--; /* translate "**" internal call prefix to CTP value */ if (l >= 2 && pp[0] == '*' && pp[1] == '*') { s = "^SCTP=0\r"; pp += 2; l -= 2; } else { s = "^SCTP=1\r"; } commands[AT_TYPE] = kstrdup(s, GFP_KERNEL); if (!commands[AT_TYPE]) goto oom; commands[AT_DIAL] = kmalloc(l+3, GFP_KERNEL); if (!commands[AT_DIAL]) goto oom; snprintf(commands[AT_DIAL], l+3, "D%.*s\r", l, pp); /* encode parameter: Calling party number */ pp = cmsg->CallingPartyNumber; if (pp != NULL && *pp > 0) { l = *pp++; /* check type of number/numbering plan byte */ /* ToDo: allow for/handle Ext=1? */ switch (*pp) { case 0x00: /* unknown type / unknown numbering plan */ case 0x01: /* unknown type / ISDN/Telephony num. plan */ break; default: dev_notice(cs->dev, "%s: %s type/plan 0x%02x unsupported\n", "CONNECT_REQ", "Calling party number", *pp); } pp++; l--; /* check presentation indicator */ if (!l) { dev_notice(cs->dev, "%s: %s IE truncated\n", "CONNECT_REQ", "Calling party number"); info = CapiIllMessageParmCoding; goto error; } switch (*pp & 0xfc) { /* ignore Screening indicator */ case 0x80: /* Presentation allowed */ s = "^SCLIP=1\r"; break; case 0xa0: /* Presentation restricted */ s = "^SCLIP=0\r"; break; default: dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", "CONNECT_REQ", "Presentation/Screening indicator", *pp); s = "^SCLIP=1\r"; } commands[AT_CLIP] = kstrdup(s, GFP_KERNEL); if (!commands[AT_CLIP]) goto oom; pp++; l--; if (l) { /* number */ commands[AT_MSN] = kmalloc(l+8, GFP_KERNEL); if (!commands[AT_MSN]) goto oom; snprintf(commands[AT_MSN], l+8, "^SMSN=%*s\r", l, pp); } } /* check parameter: CIP Value */ if (cmsg->CIPValue >= ARRAY_SIZE(cip2bchlc) || (cmsg->CIPValue > 0 && cip2bchlc[cmsg->CIPValue].bc == NULL)) { dev_notice(cs->dev, "%s: unknown CIP value %d\n", "CONNECT_REQ", cmsg->CIPValue); info = CapiCipValueUnknown; goto error; } /* * check/encode parameters: BC & HLC * must be encoded together as device doesn't accept HLC separately * explicit parameters override values derived from CIP */ /* determine lengths */ if (cmsg->BC && cmsg->BC[0]) /* BC specified explicitly */ lbc = 2*cmsg->BC[0]; else if (cip2bchlc[cmsg->CIPValue].bc) /* BC derived from CIP */ lbc = strlen(cip2bchlc[cmsg->CIPValue].bc); else /* no BC */ lbc = 0; if (cmsg->HLC && cmsg->HLC[0]) /* HLC specified explicitly */ lhlc = 2*cmsg->HLC[0]; else if (cip2bchlc[cmsg->CIPValue].hlc) /* HLC derived from CIP */ lhlc = strlen(cip2bchlc[cmsg->CIPValue].hlc); else /* no HLC */ lhlc = 0; if (lbc) { /* have BC: allocate and assemble command string */ l = lbc + 7; /* "^SBC=" + value + "\r" + null byte */ if (lhlc) l += lhlc + 7; /* ";^SHLC=" + value */ commands[AT_BC] = kmalloc(l, GFP_KERNEL); if (!commands[AT_BC]) goto oom; strcpy(commands[AT_BC], "^SBC="); if (cmsg->BC && cmsg->BC[0]) /* BC specified explicitly */ decode_ie(cmsg->BC, commands[AT_BC] + 5); else /* BC derived from CIP */ strcpy(commands[AT_BC] + 5, cip2bchlc[cmsg->CIPValue].bc); if (lhlc) { strcpy(commands[AT_BC] + lbc + 5, ";^SHLC="); if (cmsg->HLC && cmsg->HLC[0]) /* HLC specified explicitly */ decode_ie(cmsg->HLC, commands[AT_BC] + lbc + 12); else /* HLC derived from CIP */ strcpy(commands[AT_BC] + lbc + 12, cip2bchlc[cmsg->CIPValue].hlc); } strcpy(commands[AT_BC] + l - 2, "\r"); } else { /* no BC */ if (lhlc) { dev_notice(cs->dev, "%s: cannot set HLC without BC\n", "CONNECT_REQ"); info = CapiIllMessageParmCoding; /* ? */ goto error; } } /* check/encode parameter: B Protocol */ if (cmsg->BProtocol == CAPI_DEFAULT) { bcs->proto2 = L2_HDLC; dev_warn(cs->dev, "B2 Protocol X.75 SLP unsupported, using Transparent\n"); } else { switch (cmsg->B1protocol) { case 0: bcs->proto2 = L2_HDLC; break; case 1: bcs->proto2 = L2_VOICE; break; default: dev_warn(cs->dev, "B1 Protocol %u unsupported, using Transparent\n", cmsg->B1protocol); bcs->proto2 = L2_VOICE; } if (cmsg->B2protocol != 1) dev_warn(cs->dev, "B2 Protocol %u unsupported, using Transparent\n", cmsg->B2protocol); if (cmsg->B3protocol != 0) dev_warn(cs->dev, "B3 Protocol %u unsupported, using Transparent\n", cmsg->B3protocol); ignore_cstruct_param(cs, cmsg->B1configuration, "CONNECT_REQ", "B1 Configuration"); ignore_cstruct_param(cs, cmsg->B2configuration, "CONNECT_REQ", "B2 Configuration"); ignore_cstruct_param(cs, cmsg->B3configuration, "CONNECT_REQ", "B3 Configuration"); } commands[AT_PROTO] = kmalloc(9, GFP_KERNEL); if (!commands[AT_PROTO]) goto oom; snprintf(commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2); /* ToDo: check/encode remaining parameters */ ignore_cstruct_param(cs, cmsg->CalledPartySubaddress, "CONNECT_REQ", "Called pty subaddr"); ignore_cstruct_param(cs, cmsg->CallingPartySubaddress, "CONNECT_REQ", "Calling pty subaddr"); ignore_cstruct_param(cs, cmsg->LLC, "CONNECT_REQ", "LLC"); if (cmsg->AdditionalInfo != CAPI_DEFAULT) { ignore_cstruct_param(cs, cmsg->BChannelinformation, "CONNECT_REQ", "B Channel Information"); ignore_cstruct_param(cs, cmsg->Keypadfacility, "CONNECT_REQ", "Keypad Facility"); ignore_cstruct_param(cs, cmsg->Useruserdata, "CONNECT_REQ", "User-User Data"); ignore_cstruct_param(cs, cmsg->Facilitydataarray, "CONNECT_REQ", "Facility Data Array"); } /* encode parameter: B channel to use */ commands[AT_ISO] = kmalloc(9, GFP_KERNEL); if (!commands[AT_ISO]) goto oom; snprintf(commands[AT_ISO], 9, "^SISO=%u\r", (unsigned) bcs->channel + 1); /* queue & schedule EV_DIAL event */ if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands, bcs->at_state.seq_index, NULL)) { info = CAPI_MSGOSRESOURCEERR; goto error; } gigaset_schedule_event(cs); send_conf(iif, ap, skb, CapiSuccess); return; oom: dev_err(cs->dev, "%s: out of memory\n", __func__); info = CAPI_MSGOSRESOURCEERR; error: if (commands) for (i = 0; i < AT_NUM; i++) kfree(commands[i]); kfree(commands); gigaset_free_channel(bcs); send_conf(iif, ap, skb, info); } /* * process CONNECT_RESP message * checks protocol parameters and queues an ACCEPT or HUP event */ static void do_connect_resp(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { struct cardstate *cs = iif->ctr.driverdata; _cmsg *cmsg = &iif->acmsg; struct bc_state *bcs; struct gigaset_capi_appl *oap; unsigned long flags; int channel; /* decode message */ capi_message2cmsg(cmsg, skb->data); dump_cmsg(DEBUG_CMD, __func__, cmsg); dev_kfree_skb_any(skb); /* extract and check channel number from PLCI */ channel = (cmsg->adr.adrPLCI >> 8) & 0xff; if (!channel || channel > cs->channels) { dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", "CONNECT_RESP", "PLCI", cmsg->adr.adrPLCI); return; } bcs = cs->bcs + channel - 1; switch (cmsg->Reject) { case 0: /* Accept */ /* drop all competing applications, keep only this one */ spin_lock_irqsave(&bcs->aplock, flags); while (bcs->ap != NULL) { oap = bcs->ap; bcs->ap = oap->bcnext; if (oap != ap) { spin_unlock_irqrestore(&bcs->aplock, flags); send_disconnect_ind(bcs, oap, CapiCallGivenToOtherApplication); spin_lock_irqsave(&bcs->aplock, flags); } } ap->bcnext = NULL; bcs->ap = ap; spin_unlock_irqrestore(&bcs->aplock, flags); bcs->rx_bufsize = ap->rp.datablklen; dev_kfree_skb(bcs->rx_skb); gigaset_new_rx_skb(bcs); bcs->chstate |= CHS_NOTIFY_LL; /* check/encode B channel protocol */ if (cmsg->BProtocol == CAPI_DEFAULT) { bcs->proto2 = L2_HDLC; dev_warn(cs->dev, "B2 Protocol X.75 SLP unsupported, using Transparent\n"); } else { switch (cmsg->B1protocol) { case 0: bcs->proto2 = L2_HDLC; break; case 1: bcs->proto2 = L2_VOICE; break; default: dev_warn(cs->dev, "B1 Protocol %u unsupported, using Transparent\n", cmsg->B1protocol); bcs->proto2 = L2_VOICE; } if (cmsg->B2protocol != 1) dev_warn(cs->dev, "B2 Protocol %u unsupported, using Transparent\n", cmsg->B2protocol); if (cmsg->B3protocol != 0) dev_warn(cs->dev, "B3 Protocol %u unsupported, using Transparent\n", cmsg->B3protocol); ignore_cstruct_param(cs, cmsg->B1configuration, "CONNECT_RESP", "B1 Configuration"); ignore_cstruct_param(cs, cmsg->B2configuration, "CONNECT_RESP", "B2 Configuration"); ignore_cstruct_param(cs, cmsg->B3configuration, "CONNECT_RESP", "B3 Configuration"); } /* ToDo: check/encode remaining parameters */ ignore_cstruct_param(cs, cmsg->ConnectedNumber, "CONNECT_RESP", "Connected Number"); ignore_cstruct_param(cs, cmsg->ConnectedSubaddress, "CONNECT_RESP", "Connected Subaddress"); ignore_cstruct_param(cs, cmsg->LLC, "CONNECT_RESP", "LLC"); if (cmsg->AdditionalInfo != CAPI_DEFAULT) { ignore_cstruct_param(cs, cmsg->BChannelinformation, "CONNECT_RESP", "BChannel Information"); ignore_cstruct_param(cs, cmsg->Keypadfacility, "CONNECT_RESP", "Keypad Facility"); ignore_cstruct_param(cs, cmsg->Useruserdata, "CONNECT_RESP", "User-User Data"); ignore_cstruct_param(cs, cmsg->Facilitydataarray, "CONNECT_RESP", "Facility Data Array"); } /* Accept call */ if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state, EV_ACCEPT, NULL, 0, NULL)) return; gigaset_schedule_event(cs); return; case 1: /* Ignore */ /* send DISCONNECT_IND to this application */ send_disconnect_ind(bcs, ap, 0); /* remove it from the list of listening apps */ spin_lock_irqsave(&bcs->aplock, flags); if (bcs->ap == ap) { bcs->ap = ap->bcnext; if (bcs->ap == NULL) { /* last one: stop ev-layer hupD notifications */ bcs->apconnstate = APCONN_NONE; bcs->chstate &= ~CHS_NOTIFY_LL; } spin_unlock_irqrestore(&bcs->aplock, flags); return; } for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) { if (oap->bcnext == ap) { oap->bcnext = oap->bcnext->bcnext; spin_unlock_irqrestore(&bcs->aplock, flags); return; } } spin_unlock_irqrestore(&bcs->aplock, flags); dev_err(cs->dev, "%s: application %u not found\n", __func__, ap->id); return; default: /* Reject */ /* drop all competing applications, keep only this one */ spin_lock_irqsave(&bcs->aplock, flags); while (bcs->ap != NULL) { oap = bcs->ap; bcs->ap = oap->bcnext; if (oap != ap) { spin_unlock_irqrestore(&bcs->aplock, flags); send_disconnect_ind(bcs, oap, CapiCallGivenToOtherApplication); spin_lock_irqsave(&bcs->aplock, flags); } } ap->bcnext = NULL; bcs->ap = ap; spin_unlock_irqrestore(&bcs->aplock, flags); /* reject call - will trigger DISCONNECT_IND for this app */ dev_info(cs->dev, "%s: Reject=%x\n", "CONNECT_RESP", cmsg->Reject); if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state, EV_HUP, NULL, 0, NULL)) return; gigaset_schedule_event(cs); return; } } /* * process CONNECT_B3_REQ message * build NCCI and emit CONNECT_B3_CONF reply */ static void do_connect_b3_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { struct cardstate *cs = iif->ctr.driverdata; _cmsg *cmsg = &iif->acmsg; struct bc_state *bcs; int channel; /* decode message */ capi_message2cmsg(cmsg, skb->data); dump_cmsg(DEBUG_CMD, __func__, cmsg); /* extract and check channel number from PLCI */ channel = (cmsg->adr.adrPLCI >> 8) & 0xff; if (!channel || channel > cs->channels) { dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", "CONNECT_B3_REQ", "PLCI", cmsg->adr.adrPLCI); send_conf(iif, ap, skb, CapiIllContrPlciNcci); return; } bcs = &cs->bcs[channel-1]; /* mark logical connection active */ bcs->apconnstate = APCONN_ACTIVE; /* build NCCI: always 1 (one B3 connection only) */ cmsg->adr.adrNCCI |= 1 << 16; /* NCPI parameter: not applicable for B3 Transparent */ ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI"); send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ? CapiNcpiNotSupportedByProtocol : CapiSuccess); } /* * process CONNECT_B3_RESP message * Depending on the Reject parameter, either emit CONNECT_B3_ACTIVE_IND * or queue EV_HUP and emit DISCONNECT_B3_IND. * The emitted message is always shorter than the received one, * allowing to reuse the skb. */ static void do_connect_b3_resp(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { struct cardstate *cs = iif->ctr.driverdata; _cmsg *cmsg = &iif->acmsg; struct bc_state *bcs; int channel; unsigned int msgsize; u8 command; /* decode message */ capi_message2cmsg(cmsg, skb->data); dump_cmsg(DEBUG_CMD, __func__, cmsg); /* extract and check channel number and NCCI */ channel = (cmsg->adr.adrNCCI >> 8) & 0xff; if (!channel || channel > cs->channels || ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) { dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", "CONNECT_B3_RESP", "NCCI", cmsg->adr.adrNCCI); dev_kfree_skb_any(skb); return; } bcs = &cs->bcs[channel-1]; if (cmsg->Reject) { /* Reject: clear B3 connect received flag */ bcs->apconnstate = APCONN_SETUP; /* trigger hangup, causing eventual DISCONNECT_IND */ if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) { dev_kfree_skb_any(skb); return; } gigaset_schedule_event(cs); /* emit DISCONNECT_B3_IND */ command = CAPI_DISCONNECT_B3; msgsize = CAPI_DISCONNECT_B3_IND_BASELEN; } else { /* * Accept: emit CONNECT_B3_ACTIVE_IND immediately, as * we only send CONNECT_B3_IND if the B channel is up */ command = CAPI_CONNECT_B3_ACTIVE; msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN; } capi_cmsg_header(cmsg, ap->id, command, CAPI_IND, ap->nextMessageNumber++, cmsg->adr.adrNCCI); __skb_trim(skb, msgsize); capi_cmsg2message(cmsg, skb->data); dump_cmsg(DEBUG_CMD, __func__, cmsg); capi_ctr_handle_message(&iif->ctr, ap->id, skb); } /* * process DISCONNECT_REQ message * schedule EV_HUP and emit DISCONNECT_B3_IND if necessary, * emit DISCONNECT_CONF reply */ static void do_disconnect_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { struct cardstate *cs = iif->ctr.driverdata; _cmsg *cmsg = &iif->acmsg; struct bc_state *bcs; _cmsg *b3cmsg; struct sk_buff *b3skb; int channel; /* decode message */ capi_message2cmsg(cmsg, skb->data); dump_cmsg(DEBUG_CMD, __func__, cmsg); /* extract and check channel number from PLCI */ channel = (cmsg->adr.adrPLCI >> 8) & 0xff; if (!channel || channel > cs->channels) { dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", "DISCONNECT_REQ", "PLCI", cmsg->adr.adrPLCI); send_conf(iif, ap, skb, CapiIllContrPlciNcci); return; } bcs = cs->bcs + channel - 1; /* ToDo: process parameter: Additional info */ if (cmsg->AdditionalInfo != CAPI_DEFAULT) { ignore_cstruct_param(cs, cmsg->BChannelinformation, "DISCONNECT_REQ", "B Channel Information"); ignore_cstruct_param(cs, cmsg->Keypadfacility, "DISCONNECT_REQ", "Keypad Facility"); ignore_cstruct_param(cs, cmsg->Useruserdata, "DISCONNECT_REQ", "User-User Data"); ignore_cstruct_param(cs, cmsg->Facilitydataarray, "DISCONNECT_REQ", "Facility Data Array"); } /* skip if DISCONNECT_IND already sent */ if (!bcs->apconnstate) return; /* check for active logical connection */ if (bcs->apconnstate >= APCONN_ACTIVE) { /* * emit DISCONNECT_B3_IND with cause 0x3301 * use separate cmsg structure, as the content of iif->acmsg * is still needed for creating the _CONF message */ b3cmsg = kmalloc(sizeof(*b3cmsg), GFP_KERNEL); if (!b3cmsg) { dev_err(cs->dev, "%s: out of memory\n", __func__); send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); return; } capi_cmsg_header(b3cmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND, ap->nextMessageNumber++, cmsg->adr.adrPLCI | (1 << 16)); b3cmsg->Reason_B3 = CapiProtocolErrorLayer1; b3skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_KERNEL); if (b3skb == NULL) { dev_err(cs->dev, "%s: out of memory\n", __func__); send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); kfree(b3cmsg); return; } capi_cmsg2message(b3cmsg, __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN)); kfree(b3cmsg); capi_ctr_handle_message(&iif->ctr, ap->id, b3skb); } /* trigger hangup, causing eventual DISCONNECT_IND */ if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) { send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); return; } gigaset_schedule_event(cs); /* emit reply */ send_conf(iif, ap, skb, CapiSuccess); } /* * process DISCONNECT_B3_REQ message * schedule EV_HUP and emit DISCONNECT_B3_CONF reply */ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { struct cardstate *cs = iif->ctr.driverdata; _cmsg *cmsg = &iif->acmsg; struct bc_state *bcs; int channel; /* decode message */ capi_message2cmsg(cmsg, skb->data); dump_cmsg(DEBUG_CMD, __func__, cmsg); /* extract and check channel number and NCCI */ channel = (cmsg->adr.adrNCCI >> 8) & 0xff; if (!channel || channel > cs->channels || ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) { dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", "DISCONNECT_B3_REQ", "NCCI", cmsg->adr.adrNCCI); send_conf(iif, ap, skb, CapiIllContrPlciNcci); return; } bcs = &cs->bcs[channel-1]; /* reject if logical connection not active */ if (bcs->apconnstate < APCONN_ACTIVE) { send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); return; } /* trigger hangup, causing eventual DISCONNECT_B3_IND */ if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) { send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); return; } gigaset_schedule_event(cs); /* NCPI parameter: not applicable for B3 Transparent */ ignore_cstruct_param(cs, cmsg->NCPI, "DISCONNECT_B3_REQ", "NCPI"); send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ? CapiNcpiNotSupportedByProtocol : CapiSuccess); } /* * process DATA_B3_REQ message */ static void do_data_b3_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { struct cardstate *cs = iif->ctr.driverdata; struct bc_state *bcs; int channel = CAPIMSG_PLCI_PART(skb->data); u16 ncci = CAPIMSG_NCCI_PART(skb->data); u16 msglen = CAPIMSG_LEN(skb->data); u16 datalen = CAPIMSG_DATALEN(skb->data); u16 flags = CAPIMSG_FLAGS(skb->data); u16 msgid = CAPIMSG_MSGID(skb->data); u16 handle = CAPIMSG_HANDLE_REQ(skb->data); /* frequent message, avoid _cmsg overhead */ dump_rawmsg(DEBUG_MCMD, __func__, skb->data); /* check parameters */ if (channel == 0 || channel > cs->channels || ncci != 1) { dev_notice(cs->dev, "%s: invalid %s 0x%02x\n", "DATA_B3_REQ", "NCCI", CAPIMSG_NCCI(skb->data)); send_conf(iif, ap, skb, CapiIllContrPlciNcci); return; } bcs = &cs->bcs[channel-1]; if (msglen != CAPI_DATA_B3_REQ_LEN && msglen != CAPI_DATA_B3_REQ_LEN64) dev_notice(cs->dev, "%s: unexpected length %d\n", "DATA_B3_REQ", msglen); if (msglen + datalen != skb->len) dev_notice(cs->dev, "%s: length mismatch (%d+%d!=%d)\n", "DATA_B3_REQ", msglen, datalen, skb->len); if (msglen + datalen > skb->len) { /* message too short for announced data length */ send_conf(iif, ap, skb, CapiIllMessageParmCoding); /* ? */ return; } if (flags & CAPI_FLAGS_RESERVED) { dev_notice(cs->dev, "%s: reserved flags set (%x)\n", "DATA_B3_REQ", flags); send_conf(iif, ap, skb, CapiIllMessageParmCoding); return; } /* reject if logical connection not active */ if (bcs->apconnstate < APCONN_ACTIVE) { send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); return; } /* pull CAPI message into link layer header */ skb_reset_mac_header(skb); skb->mac_len = msglen; skb_pull(skb, msglen); /* pass to device-specific module */ if (cs->ops->send_skb(bcs, skb) < 0) { send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR); return; } /* * DATA_B3_CONF will be sent by gigaset_skb_sent() only if "delivery * confirmation" bit is set; otherwise we have to send it now */ if (!(flags & CAPI_FLAGS_DELIVERY_CONFIRMATION)) send_data_b3_conf(cs, &iif->ctr, ap->id, msgid, channel, handle, flags ? CapiFlagsNotSupportedByProtocol : CAPI_NOERROR); } /* * process RESET_B3_REQ message * just always reply "not supported by current protocol" */ static void do_reset_b3_req(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { /* decode message */ capi_message2cmsg(&iif->acmsg, skb->data); dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); send_conf(iif, ap, skb, CapiResetProcedureNotSupportedByCurrentProtocol); } /* * dump unsupported/ignored messages at most twice per minute, * some apps send those very frequently */ static unsigned long ignored_msg_dump_time; /* * unsupported CAPI message handler */ static void do_unsupported(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { /* decode message */ capi_message2cmsg(&iif->acmsg, skb->data); if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); } /* * CAPI message handler: no-op */ static void do_nothing(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) { /* decode message */ capi_message2cmsg(&iif->acmsg, skb->data); dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg); } dev_kfree_skb_any(skb); } static void do_data_b3_resp(struct gigaset_capi_ctr *iif, struct gigaset_capi_appl *ap, struct sk_buff *skb) { dump_rawmsg(DEBUG_MCMD, __func__, skb->data); dev_kfree_skb_any(skb); } /* table of outgoing CAPI message handlers with lookup function */ typedef void (*capi_send_handler_t)(struct gigaset_capi_ctr *, struct gigaset_capi_appl *, struct sk_buff *); static struct { u16 cmd; capi_send_handler_t handler; } capi_send_handler_table[] = { /* most frequent messages first for faster lookup */ { CAPI_DATA_B3_REQ, do_data_b3_req }, { CAPI_DATA_B3_RESP, do_data_b3_resp }, { CAPI_ALERT_REQ, do_alert_req }, { CAPI_CONNECT_ACTIVE_RESP, do_nothing }, { CAPI_CONNECT_B3_ACTIVE_RESP, do_nothing }, { CAPI_CONNECT_B3_REQ, do_connect_b3_req }, { CAPI_CONNECT_B3_RESP, do_connect_b3_resp }, { CAPI_CONNECT_B3_T90_ACTIVE_RESP, do_nothing }, { CAPI_CONNECT_REQ, do_connect_req }, { CAPI_CONNECT_RESP, do_connect_resp }, { CAPI_DISCONNECT_B3_REQ, do_disconnect_b3_req }, { CAPI_DISCONNECT_B3_RESP, do_nothing }, { CAPI_DISCONNECT_REQ, do_disconnect_req }, { CAPI_DISCONNECT_RESP, do_nothing }, { CAPI_FACILITY_REQ, do_facility_req }, { CAPI_FACILITY_RESP, do_nothing }, { CAPI_LISTEN_REQ, do_listen_req }, { CAPI_SELECT_B_PROTOCOL_REQ, do_unsupported }, { CAPI_RESET_B3_REQ, do_reset_b3_req }, { CAPI_RESET_B3_RESP, do_nothing }, /* * ToDo: support overlap sending (requires ev-layer state * machine extension to generate additional ATD commands) */ { CAPI_INFO_REQ, do_unsupported }, { CAPI_INFO_RESP, do_nothing }, /* * ToDo: what's the proper response for these? */ { CAPI_MANUFACTURER_REQ, do_nothing }, { CAPI_MANUFACTURER_RESP, do_nothing }, }; /* look up handler */ static inline capi_send_handler_t lookup_capi_send_handler(const u16 cmd) { size_t i; for (i = 0; i < ARRAY_SIZE(capi_send_handler_table); i++) if (capi_send_handler_table[i].cmd == cmd) return capi_send_handler_table[i].handler; return NULL; } /** * gigaset_send_message() - accept a CAPI message from an application * @ctr: controller descriptor structure. * @skb: CAPI message. * * Return value: CAPI error code * Note: capidrv (and probably others, too) only uses the return value to * decide whether it has to free the skb (only if result != CAPI_NOERROR (0)) */ static u16 gigaset_send_message(struct capi_ctr *ctr, struct sk_buff *skb) { struct gigaset_capi_ctr *iif = container_of(ctr, struct gigaset_capi_ctr, ctr); struct cardstate *cs = ctr->driverdata; struct gigaset_capi_appl *ap; capi_send_handler_t handler; /* can only handle linear sk_buffs */ if (skb_linearize(skb) < 0) { dev_warn(cs->dev, "%s: skb_linearize failed\n", __func__); return CAPI_MSGOSRESOURCEERR; } /* retrieve application data structure */ ap = get_appl(iif, CAPIMSG_APPID(skb->data)); if (!ap) { dev_notice(cs->dev, "%s: application %u not registered\n", __func__, CAPIMSG_APPID(skb->data)); return CAPI_ILLAPPNR; } /* look up command */ handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data)); if (!handler) { /* unknown/unsupported message type */ if (printk_ratelimit()) dev_notice(cs->dev, "%s: unsupported message %u\n", __func__, CAPIMSG_CMD(skb->data)); return CAPI_ILLCMDORSUBCMDORMSGTOSMALL; } /* serialize */ if (atomic_add_return(1, &iif->sendqlen) > 1) { /* queue behind other messages */ skb_queue_tail(&iif->sendqueue, skb); return CAPI_NOERROR; } /* process message */ handler(iif, ap, skb); /* process other messages arrived in the meantime */ while (atomic_sub_return(1, &iif->sendqlen) > 0) { skb = skb_dequeue(&iif->sendqueue); if (!skb) { /* should never happen */ dev_err(cs->dev, "%s: send queue empty\n", __func__); continue; } ap = get_appl(iif, CAPIMSG_APPID(skb->data)); if (!ap) { /* could that happen? */ dev_warn(cs->dev, "%s: application %u vanished\n", __func__, CAPIMSG_APPID(skb->data)); continue; } handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data)); if (!handler) { /* should never happen */ dev_err(cs->dev, "%s: handler %x vanished\n", __func__, CAPIMSG_CMD(skb->data)); continue; } handler(iif, ap, skb); } return CAPI_NOERROR; } /** * gigaset_procinfo() - build single line description for controller * @ctr: controller descriptor structure. * * Return value: pointer to generated string (null terminated) */ static char *gigaset_procinfo(struct capi_ctr *ctr) { return ctr->name; /* ToDo: more? */ } static int gigaset_proc_show(struct seq_file *m, void *v) { struct capi_ctr *ctr = m->private; struct cardstate *cs = ctr->driverdata; char *s; int i; seq_printf(m, "%-16s %s\n", "name", ctr->name); seq_printf(m, "%-16s %s %s\n", "dev", dev_driver_string(cs->dev), dev_name(cs->dev)); seq_printf(m, "%-16s %d\n", "id", cs->myid); if (cs->gotfwver) seq_printf(m, "%-16s %d.%d.%d.%d\n", "firmware", cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]); seq_printf(m, "%-16s %d\n", "channels", cs->channels); seq_printf(m, "%-16s %s\n", "onechannel", cs->onechannel ? "yes" : "no"); switch (cs->mode) { case M_UNKNOWN: s = "unknown"; break; case M_CONFIG: s = "config"; break; case M_UNIMODEM: s = "Unimodem"; break; case M_CID: s = "CID"; break; default: s = "??"; } seq_printf(m, "%-16s %s\n", "mode", s); switch (cs->mstate) { case MS_UNINITIALIZED: s = "uninitialized"; break; case MS_INIT: s = "init"; break; case MS_LOCKED: s = "locked"; break; case MS_SHUTDOWN: s = "shutdown"; break; case MS_RECOVER: s = "recover"; break; case MS_READY: s = "ready"; break; default: s = "??"; } seq_printf(m, "%-16s %s\n", "mstate", s); seq_printf(m, "%-16s %s\n", "running", cs->running ? "yes" : "no"); seq_printf(m, "%-16s %s\n", "connected", cs->connected ? "yes" : "no"); seq_printf(m, "%-16s %s\n", "isdn_up", cs->isdn_up ? "yes" : "no"); seq_printf(m, "%-16s %s\n", "cidmode", cs->cidmode ? "yes" : "no"); for (i = 0; i < cs->channels; i++) { seq_printf(m, "[%d]%-13s %d\n", i, "corrupted", cs->bcs[i].corrupted); seq_printf(m, "[%d]%-13s %d\n", i, "trans_down", cs->bcs[i].trans_down); seq_printf(m, "[%d]%-13s %d\n", i, "trans_up", cs->bcs[i].trans_up); seq_printf(m, "[%d]%-13s %d\n", i, "chstate", cs->bcs[i].chstate); switch (cs->bcs[i].proto2) { case L2_BITSYNC: s = "bitsync"; break; case L2_HDLC: s = "HDLC"; break; case L2_VOICE: s = "voice"; break; default: s = "??"; } seq_printf(m, "[%d]%-13s %s\n", i, "proto2", s); } return 0; } static int gigaset_proc_open(struct inode *inode, struct file *file) { return single_open(file, gigaset_proc_show, PDE(inode)->data); } static const struct file_operations gigaset_proc_fops = { .owner = THIS_MODULE, .open = gigaset_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /** * gigaset_isdn_regdev() - register device to LL * @cs: device descriptor structure. * @isdnid: device name. * * Return value: 1 for success, 0 for failure */ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) { struct gigaset_capi_ctr *iif; int rc; iif = kmalloc(sizeof(*iif), GFP_KERNEL); if (!iif) { pr_err("%s: out of memory\n", __func__); return 0; } /* prepare controller structure */ iif->ctr.owner = THIS_MODULE; iif->ctr.driverdata = cs; strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name)); iif->ctr.driver_name = "gigaset"; iif->ctr.load_firmware = NULL; iif->ctr.reset_ctr = NULL; iif->ctr.register_appl = gigaset_register_appl; iif->ctr.release_appl = gigaset_release_appl; iif->ctr.send_message = gigaset_send_message; iif->ctr.procinfo = gigaset_procinfo; iif->ctr.proc_fops = &gigaset_proc_fops; INIT_LIST_HEAD(&iif->appls); skb_queue_head_init(&iif->sendqueue); atomic_set(&iif->sendqlen, 0); /* register controller with CAPI */ rc = attach_capi_ctr(&iif->ctr); if (rc) { pr_err("attach_capi_ctr failed (%d)\n", rc); kfree(iif); return 0; } cs->iif = iif; cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN; return 1; } /** * gigaset_isdn_unregdev() - unregister device from LL * @cs: device descriptor structure. */ void gigaset_isdn_unregdev(struct cardstate *cs) { struct gigaset_capi_ctr *iif = cs->iif; detach_capi_ctr(&iif->ctr); kfree(iif); cs->iif = NULL; } static struct capi_driver capi_driver_gigaset = { .name = "gigaset", .revision = "1.0", }; /** * gigaset_isdn_regdrv() - register driver to LL */ void gigaset_isdn_regdrv(void) { pr_info("Kernel CAPI interface\n"); register_capi_driver(&capi_driver_gigaset); } /** * gigaset_isdn_unregdrv() - unregister driver from LL */ void gigaset_isdn_unregdrv(void) { unregister_capi_driver(&capi_driver_gigaset); }
gpl-2.0
rfbsoft/polievanie_kernel
sound/soc/codecs/wm9090.c
2722
21211
/* * ALSA SoC WM9090 driver * * Copyright 2009-12 Wolfson Microelectronics * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/module.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/regmap.h> #include <linux/slab.h> #include <sound/initval.h> #include <sound/soc.h> #include <sound/tlv.h> #include <sound/wm9090.h> #include "wm9090.h" static const struct reg_default wm9090_reg_defaults[] = { { 1, 0x0006 }, /* R1 - Power Management (1) */ { 2, 0x6000 }, /* R2 - Power Management (2) */ { 3, 0x0000 }, /* R3 - Power Management (3) */ { 6, 0x01C0 }, /* R6 - Clocking 1 */ { 22, 0x0003 }, /* R22 - IN1 Line Control */ { 23, 0x0003 }, /* R23 - IN2 Line Control */ { 24, 0x0083 }, /* R24 - IN1 Line Input A Volume */ { 25, 0x0083 }, /* R25 - IN1 Line Input B Volume */ { 26, 0x0083 }, /* R26 - IN2 Line Input A Volume */ { 27, 0x0083 }, /* R27 - IN2 Line Input B Volume */ { 28, 0x002D }, /* R28 - Left Output Volume */ { 29, 0x002D }, /* R29 - Right Output Volume */ { 34, 0x0100 }, /* R34 - SPKMIXL Attenuation */ { 35, 0x0010 }, /* R36 - SPKOUT Mixers */ { 37, 0x0140 }, /* R37 - ClassD3 */ { 38, 0x0039 }, /* R38 - Speaker Volume Left */ { 45, 0x0000 }, /* R45 - Output Mixer1 */ { 46, 0x0000 }, /* R46 - Output Mixer2 */ { 47, 0x0100 }, /* R47 - Output Mixer3 */ { 48, 0x0100 }, /* R48 - Output Mixer4 */ { 54, 0x0000 }, /* R54 - Speaker Mixer */ { 57, 0x000D }, /* R57 - AntiPOP2 */ { 70, 0x0000 }, /* R70 - Write Sequencer 0 */ { 71, 0x0000 }, /* R71 - Write Sequencer 1 */ { 72, 0x0000 }, /* R72 - Write Sequencer 2 */ { 73, 0x0000 }, /* R73 - Write Sequencer 3 */ { 74, 0x0000 }, /* R74 - Write Sequencer 4 */ { 75, 0x0000 }, /* R75 - Write Sequencer 5 */ { 76, 0x1F25 }, /* R76 - Charge Pump 1 */ { 85, 0x054A }, /* R85 - DC Servo 1 */ { 87, 0x0000 }, /* R87 - DC Servo 3 */ { 96, 0x0100 }, /* R96 - Analogue HP 0 */ { 98, 0x8640 }, /* R98 - AGC Control 0 */ { 99, 0xC000 }, /* R99 - AGC Control 1 */ { 100, 0x0200 }, /* R100 - AGC Control 2 */ }; /* This struct is used to save the context */ struct wm9090_priv { struct wm9090_platform_data pdata; struct regmap *regmap; }; static bool wm9090_volatile(struct device *dev, unsigned int reg) { switch (reg) { case WM9090_SOFTWARE_RESET: case WM9090_DC_SERVO_0: case WM9090_DC_SERVO_READBACK_0: case WM9090_DC_SERVO_READBACK_1: case WM9090_DC_SERVO_READBACK_2: return true; default: return false; } } static bool wm9090_readable(struct device *dev, unsigned int reg) { switch (reg) { case WM9090_SOFTWARE_RESET: case WM9090_POWER_MANAGEMENT_1: case WM9090_POWER_MANAGEMENT_2: case WM9090_POWER_MANAGEMENT_3: case WM9090_CLOCKING_1: case WM9090_IN1_LINE_CONTROL: case WM9090_IN2_LINE_CONTROL: case WM9090_IN1_LINE_INPUT_A_VOLUME: case WM9090_IN1_LINE_INPUT_B_VOLUME: case WM9090_IN2_LINE_INPUT_A_VOLUME: case WM9090_IN2_LINE_INPUT_B_VOLUME: case WM9090_LEFT_OUTPUT_VOLUME: case WM9090_RIGHT_OUTPUT_VOLUME: case WM9090_SPKMIXL_ATTENUATION: case WM9090_SPKOUT_MIXERS: case WM9090_CLASSD3: case WM9090_SPEAKER_VOLUME_LEFT: case WM9090_OUTPUT_MIXER1: case WM9090_OUTPUT_MIXER2: case WM9090_OUTPUT_MIXER3: case WM9090_OUTPUT_MIXER4: case WM9090_SPEAKER_MIXER: case WM9090_ANTIPOP2: case WM9090_WRITE_SEQUENCER_0: case WM9090_WRITE_SEQUENCER_1: case WM9090_WRITE_SEQUENCER_2: case WM9090_WRITE_SEQUENCER_3: case WM9090_WRITE_SEQUENCER_4: case WM9090_WRITE_SEQUENCER_5: case WM9090_CHARGE_PUMP_1: case WM9090_DC_SERVO_0: case WM9090_DC_SERVO_1: case WM9090_DC_SERVO_3: case WM9090_DC_SERVO_READBACK_0: case WM9090_DC_SERVO_READBACK_1: case WM9090_DC_SERVO_READBACK_2: case WM9090_ANALOGUE_HP_0: case WM9090_AGC_CONTROL_0: case WM9090_AGC_CONTROL_1: case WM9090_AGC_CONTROL_2: return true; default: return false; } } static void wait_for_dc_servo(struct snd_soc_codec *codec) { unsigned int reg; int count = 0; dev_dbg(codec->dev, "Waiting for DC servo...\n"); do { count++; msleep(1); reg = snd_soc_read(codec, WM9090_DC_SERVO_READBACK_0); dev_dbg(codec->dev, "DC servo status: %x\n", reg); } while ((reg & WM9090_DCS_CAL_COMPLETE_MASK) != WM9090_DCS_CAL_COMPLETE_MASK && count < 1000); if ((reg & WM9090_DCS_CAL_COMPLETE_MASK) != WM9090_DCS_CAL_COMPLETE_MASK) dev_err(codec->dev, "Timed out waiting for DC Servo\n"); } static const unsigned int in_tlv[] = { TLV_DB_RANGE_HEAD(3), 0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0), 1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0), 4, 6, TLV_DB_SCALE_ITEM(600, 600, 0), }; static const unsigned int mix_tlv[] = { TLV_DB_RANGE_HEAD(2), 0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0), 3, 3, TLV_DB_SCALE_ITEM(0, 0, 0), }; static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0); static const unsigned int spkboost_tlv[] = { TLV_DB_RANGE_HEAD(2), 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0), 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0), }; static const struct snd_kcontrol_new wm9090_controls[] = { SOC_SINGLE_TLV("IN1A Volume", WM9090_IN1_LINE_INPUT_A_VOLUME, 0, 6, 0, in_tlv), SOC_SINGLE("IN1A Switch", WM9090_IN1_LINE_INPUT_A_VOLUME, 7, 1, 1), SOC_SINGLE("IN1A ZC Switch", WM9090_IN1_LINE_INPUT_A_VOLUME, 6, 1, 0), SOC_SINGLE_TLV("IN2A Volume", WM9090_IN2_LINE_INPUT_A_VOLUME, 0, 6, 0, in_tlv), SOC_SINGLE("IN2A Switch", WM9090_IN2_LINE_INPUT_A_VOLUME, 7, 1, 1), SOC_SINGLE("IN2A ZC Switch", WM9090_IN2_LINE_INPUT_A_VOLUME, 6, 1, 0), SOC_SINGLE("MIXOUTL Switch", WM9090_OUTPUT_MIXER3, 8, 1, 1), SOC_SINGLE_TLV("MIXOUTL IN1A Volume", WM9090_OUTPUT_MIXER3, 6, 3, 1, mix_tlv), SOC_SINGLE_TLV("MIXOUTL IN2A Volume", WM9090_OUTPUT_MIXER3, 2, 3, 1, mix_tlv), SOC_SINGLE("MIXOUTR Switch", WM9090_OUTPUT_MIXER4, 8, 1, 1), SOC_SINGLE_TLV("MIXOUTR IN1A Volume", WM9090_OUTPUT_MIXER4, 6, 3, 1, mix_tlv), SOC_SINGLE_TLV("MIXOUTR IN2A Volume", WM9090_OUTPUT_MIXER4, 2, 3, 1, mix_tlv), SOC_SINGLE("SPKMIX Switch", WM9090_SPKMIXL_ATTENUATION, 8, 1, 1), SOC_SINGLE_TLV("SPKMIX IN1A Volume", WM9090_SPKMIXL_ATTENUATION, 6, 3, 1, mix_tlv), SOC_SINGLE_TLV("SPKMIX IN2A Volume", WM9090_SPKMIXL_ATTENUATION, 2, 3, 1, mix_tlv), SOC_DOUBLE_R_TLV("Headphone Volume", WM9090_LEFT_OUTPUT_VOLUME, WM9090_RIGHT_OUTPUT_VOLUME, 0, 63, 0, out_tlv), SOC_DOUBLE_R("Headphone Switch", WM9090_LEFT_OUTPUT_VOLUME, WM9090_RIGHT_OUTPUT_VOLUME, 6, 1, 1), SOC_DOUBLE_R("Headphone ZC Switch", WM9090_LEFT_OUTPUT_VOLUME, WM9090_RIGHT_OUTPUT_VOLUME, 7, 1, 0), SOC_SINGLE_TLV("Speaker Volume", WM9090_SPEAKER_VOLUME_LEFT, 0, 63, 0, out_tlv), SOC_SINGLE("Speaker Switch", WM9090_SPEAKER_VOLUME_LEFT, 6, 1, 1), SOC_SINGLE("Speaker ZC Switch", WM9090_SPEAKER_VOLUME_LEFT, 7, 1, 0), SOC_SINGLE_TLV("Speaker Boost Volume", WM9090_CLASSD3, 3, 7, 0, spkboost_tlv), }; static const struct snd_kcontrol_new wm9090_in1_se_controls[] = { SOC_SINGLE_TLV("IN1B Volume", WM9090_IN1_LINE_INPUT_B_VOLUME, 0, 6, 0, in_tlv), SOC_SINGLE("IN1B Switch", WM9090_IN1_LINE_INPUT_B_VOLUME, 7, 1, 1), SOC_SINGLE("IN1B ZC Switch", WM9090_IN1_LINE_INPUT_B_VOLUME, 6, 1, 0), SOC_SINGLE_TLV("SPKMIX IN1B Volume", WM9090_SPKMIXL_ATTENUATION, 4, 3, 1, mix_tlv), SOC_SINGLE_TLV("MIXOUTL IN1B Volume", WM9090_OUTPUT_MIXER3, 4, 3, 1, mix_tlv), SOC_SINGLE_TLV("MIXOUTR IN1B Volume", WM9090_OUTPUT_MIXER4, 4, 3, 1, mix_tlv), }; static const struct snd_kcontrol_new wm9090_in2_se_controls[] = { SOC_SINGLE_TLV("IN2B Volume", WM9090_IN2_LINE_INPUT_B_VOLUME, 0, 6, 0, in_tlv), SOC_SINGLE("IN2B Switch", WM9090_IN2_LINE_INPUT_B_VOLUME, 7, 1, 1), SOC_SINGLE("IN2B ZC Switch", WM9090_IN2_LINE_INPUT_B_VOLUME, 6, 1, 0), SOC_SINGLE_TLV("SPKMIX IN2B Volume", WM9090_SPKMIXL_ATTENUATION, 0, 3, 1, mix_tlv), SOC_SINGLE_TLV("MIXOUTL IN2B Volume", WM9090_OUTPUT_MIXER3, 0, 3, 1, mix_tlv), SOC_SINGLE_TLV("MIXOUTR IN2B Volume", WM9090_OUTPUT_MIXER4, 0, 3, 1, mix_tlv), }; static int hp_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; unsigned int reg = snd_soc_read(codec, WM9090_ANALOGUE_HP_0); switch (event) { case SND_SOC_DAPM_POST_PMU: snd_soc_update_bits(codec, WM9090_CHARGE_PUMP_1, WM9090_CP_ENA, WM9090_CP_ENA); msleep(5); snd_soc_update_bits(codec, WM9090_POWER_MANAGEMENT_1, WM9090_HPOUT1L_ENA | WM9090_HPOUT1R_ENA, WM9090_HPOUT1L_ENA | WM9090_HPOUT1R_ENA); reg |= WM9090_HPOUT1L_DLY | WM9090_HPOUT1R_DLY; snd_soc_write(codec, WM9090_ANALOGUE_HP_0, reg); /* Start the DC servo. We don't currently use the * ability to save the state since we don't have full * control of the analogue paths and they can change * DC offsets; see the WM8904 driver for an example of * doing so. */ snd_soc_write(codec, WM9090_DC_SERVO_0, WM9090_DCS_ENA_CHAN_0 | WM9090_DCS_ENA_CHAN_1 | WM9090_DCS_TRIG_STARTUP_1 | WM9090_DCS_TRIG_STARTUP_0); wait_for_dc_servo(codec); reg |= WM9090_HPOUT1R_OUTP | WM9090_HPOUT1R_RMV_SHORT | WM9090_HPOUT1L_OUTP | WM9090_HPOUT1L_RMV_SHORT; snd_soc_write(codec, WM9090_ANALOGUE_HP_0, reg); break; case SND_SOC_DAPM_PRE_PMD: reg &= ~(WM9090_HPOUT1L_RMV_SHORT | WM9090_HPOUT1L_DLY | WM9090_HPOUT1L_OUTP | WM9090_HPOUT1R_RMV_SHORT | WM9090_HPOUT1R_DLY | WM9090_HPOUT1R_OUTP); snd_soc_write(codec, WM9090_ANALOGUE_HP_0, reg); snd_soc_write(codec, WM9090_DC_SERVO_0, 0); snd_soc_update_bits(codec, WM9090_POWER_MANAGEMENT_1, WM9090_HPOUT1L_ENA | WM9090_HPOUT1R_ENA, 0); snd_soc_update_bits(codec, WM9090_CHARGE_PUMP_1, WM9090_CP_ENA, 0); break; } return 0; } static const struct snd_kcontrol_new spkmix[] = { SOC_DAPM_SINGLE("IN1A Switch", WM9090_SPEAKER_MIXER, 6, 1, 0), SOC_DAPM_SINGLE("IN1B Switch", WM9090_SPEAKER_MIXER, 4, 1, 0), SOC_DAPM_SINGLE("IN2A Switch", WM9090_SPEAKER_MIXER, 2, 1, 0), SOC_DAPM_SINGLE("IN2B Switch", WM9090_SPEAKER_MIXER, 0, 1, 0), }; static const struct snd_kcontrol_new spkout[] = { SOC_DAPM_SINGLE("Mixer Switch", WM9090_SPKOUT_MIXERS, 4, 1, 0), }; static const struct snd_kcontrol_new mixoutl[] = { SOC_DAPM_SINGLE("IN1A Switch", WM9090_OUTPUT_MIXER1, 6, 1, 0), SOC_DAPM_SINGLE("IN1B Switch", WM9090_OUTPUT_MIXER1, 4, 1, 0), SOC_DAPM_SINGLE("IN2A Switch", WM9090_OUTPUT_MIXER1, 2, 1, 0), SOC_DAPM_SINGLE("IN2B Switch", WM9090_OUTPUT_MIXER1, 0, 1, 0), }; static const struct snd_kcontrol_new mixoutr[] = { SOC_DAPM_SINGLE("IN1A Switch", WM9090_OUTPUT_MIXER2, 6, 1, 0), SOC_DAPM_SINGLE("IN1B Switch", WM9090_OUTPUT_MIXER2, 4, 1, 0), SOC_DAPM_SINGLE("IN2A Switch", WM9090_OUTPUT_MIXER2, 2, 1, 0), SOC_DAPM_SINGLE("IN2B Switch", WM9090_OUTPUT_MIXER2, 0, 1, 0), }; static const struct snd_soc_dapm_widget wm9090_dapm_widgets[] = { SND_SOC_DAPM_INPUT("IN1+"), SND_SOC_DAPM_INPUT("IN1-"), SND_SOC_DAPM_INPUT("IN2+"), SND_SOC_DAPM_INPUT("IN2-"), SND_SOC_DAPM_SUPPLY("OSC", WM9090_POWER_MANAGEMENT_1, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("IN1A PGA", WM9090_POWER_MANAGEMENT_2, 7, 0, NULL, 0), SND_SOC_DAPM_PGA("IN1B PGA", WM9090_POWER_MANAGEMENT_2, 6, 0, NULL, 0), SND_SOC_DAPM_PGA("IN2A PGA", WM9090_POWER_MANAGEMENT_2, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("IN2B PGA", WM9090_POWER_MANAGEMENT_2, 4, 0, NULL, 0), SND_SOC_DAPM_MIXER("SPKMIX", WM9090_POWER_MANAGEMENT_3, 3, 0, spkmix, ARRAY_SIZE(spkmix)), SND_SOC_DAPM_MIXER("MIXOUTL", WM9090_POWER_MANAGEMENT_3, 5, 0, mixoutl, ARRAY_SIZE(mixoutl)), SND_SOC_DAPM_MIXER("MIXOUTR", WM9090_POWER_MANAGEMENT_3, 4, 0, mixoutr, ARRAY_SIZE(mixoutr)), SND_SOC_DAPM_PGA_E("HP PGA", SND_SOC_NOPM, 0, 0, NULL, 0, hp_ev, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_PGA("SPKPGA", WM9090_POWER_MANAGEMENT_3, 8, 0, NULL, 0), SND_SOC_DAPM_MIXER("SPKOUT", WM9090_POWER_MANAGEMENT_1, 12, 0, spkout, ARRAY_SIZE(spkout)), SND_SOC_DAPM_OUTPUT("HPR"), SND_SOC_DAPM_OUTPUT("HPL"), SND_SOC_DAPM_OUTPUT("Speaker"), }; static const struct snd_soc_dapm_route audio_map[] = { { "IN1A PGA", NULL, "IN1+" }, { "IN2A PGA", NULL, "IN2+" }, { "SPKMIX", "IN1A Switch", "IN1A PGA" }, { "SPKMIX", "IN2A Switch", "IN2A PGA" }, { "MIXOUTL", "IN1A Switch", "IN1A PGA" }, { "MIXOUTL", "IN2A Switch", "IN2A PGA" }, { "MIXOUTR", "IN1A Switch", "IN1A PGA" }, { "MIXOUTR", "IN2A Switch", "IN2A PGA" }, { "HP PGA", NULL, "OSC" }, { "HP PGA", NULL, "MIXOUTL" }, { "HP PGA", NULL, "MIXOUTR" }, { "HPL", NULL, "HP PGA" }, { "HPR", NULL, "HP PGA" }, { "SPKPGA", NULL, "OSC" }, { "SPKPGA", NULL, "SPKMIX" }, { "SPKOUT", "Mixer Switch", "SPKPGA" }, { "Speaker", NULL, "SPKOUT" }, }; static const struct snd_soc_dapm_route audio_map_in1_se[] = { { "IN1B PGA", NULL, "IN1-" }, { "SPKMIX", "IN1B Switch", "IN1B PGA" }, { "MIXOUTL", "IN1B Switch", "IN1B PGA" }, { "MIXOUTR", "IN1B Switch", "IN1B PGA" }, }; static const struct snd_soc_dapm_route audio_map_in1_diff[] = { { "IN1A PGA", NULL, "IN1-" }, }; static const struct snd_soc_dapm_route audio_map_in2_se[] = { { "IN2B PGA", NULL, "IN2-" }, { "SPKMIX", "IN2B Switch", "IN2B PGA" }, { "MIXOUTL", "IN2B Switch", "IN2B PGA" }, { "MIXOUTR", "IN2B Switch", "IN2B PGA" }, }; static const struct snd_soc_dapm_route audio_map_in2_diff[] = { { "IN2A PGA", NULL, "IN2-" }, }; static int wm9090_add_controls(struct snd_soc_codec *codec) { struct wm9090_priv *wm9090 = snd_soc_codec_get_drvdata(codec); struct snd_soc_dapm_context *dapm = &codec->dapm; int i; snd_soc_dapm_new_controls(dapm, wm9090_dapm_widgets, ARRAY_SIZE(wm9090_dapm_widgets)); snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map)); snd_soc_add_codec_controls(codec, wm9090_controls, ARRAY_SIZE(wm9090_controls)); if (wm9090->pdata.lin1_diff) { snd_soc_dapm_add_routes(dapm, audio_map_in1_diff, ARRAY_SIZE(audio_map_in1_diff)); } else { snd_soc_dapm_add_routes(dapm, audio_map_in1_se, ARRAY_SIZE(audio_map_in1_se)); snd_soc_add_codec_controls(codec, wm9090_in1_se_controls, ARRAY_SIZE(wm9090_in1_se_controls)); } if (wm9090->pdata.lin2_diff) { snd_soc_dapm_add_routes(dapm, audio_map_in2_diff, ARRAY_SIZE(audio_map_in2_diff)); } else { snd_soc_dapm_add_routes(dapm, audio_map_in2_se, ARRAY_SIZE(audio_map_in2_se)); snd_soc_add_codec_controls(codec, wm9090_in2_se_controls, ARRAY_SIZE(wm9090_in2_se_controls)); } if (wm9090->pdata.agc_ena) { for (i = 0; i < ARRAY_SIZE(wm9090->pdata.agc); i++) snd_soc_write(codec, WM9090_AGC_CONTROL_0 + i, wm9090->pdata.agc[i]); snd_soc_update_bits(codec, WM9090_POWER_MANAGEMENT_3, WM9090_AGC_ENA, WM9090_AGC_ENA); } else { snd_soc_update_bits(codec, WM9090_POWER_MANAGEMENT_3, WM9090_AGC_ENA, 0); } return 0; } /* * The machine driver should call this from their set_bias_level; if there * isn't one then this can just be set as the set_bias_level function. */ static int wm9090_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm9090_priv *wm9090 = snd_soc_codec_get_drvdata(codec); switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: snd_soc_update_bits(codec, WM9090_ANTIPOP2, WM9090_VMID_ENA, WM9090_VMID_ENA); snd_soc_update_bits(codec, WM9090_POWER_MANAGEMENT_1, WM9090_BIAS_ENA | WM9090_VMID_RES_MASK, WM9090_BIAS_ENA | 1 << WM9090_VMID_RES_SHIFT); msleep(1); /* Probably an overestimate */ break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { /* Restore the register cache */ regcache_sync(wm9090->regmap); } /* We keep VMID off during standby since the combination of * ground referenced outputs and class D speaker mean that * latency is not an issue. */ snd_soc_update_bits(codec, WM9090_POWER_MANAGEMENT_1, WM9090_BIAS_ENA | WM9090_VMID_RES_MASK, 0); snd_soc_update_bits(codec, WM9090_ANTIPOP2, WM9090_VMID_ENA, 0); break; case SND_SOC_BIAS_OFF: break; } codec->dapm.bias_level = level; return 0; } static int wm9090_probe(struct snd_soc_codec *codec) { struct wm9090_priv *wm9090 = dev_get_drvdata(codec->dev); int ret; codec->control_data = wm9090->regmap; ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_REGMAP); if (ret != 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } /* Configure some defaults; they will be written out when we * bring the bias up. */ snd_soc_update_bits(codec, WM9090_IN1_LINE_INPUT_A_VOLUME, WM9090_IN1_VU | WM9090_IN1A_ZC, WM9090_IN1_VU | WM9090_IN1A_ZC); snd_soc_update_bits(codec, WM9090_IN1_LINE_INPUT_B_VOLUME, WM9090_IN1_VU | WM9090_IN1B_ZC, WM9090_IN1_VU | WM9090_IN1B_ZC); snd_soc_update_bits(codec, WM9090_IN2_LINE_INPUT_A_VOLUME, WM9090_IN2_VU | WM9090_IN2A_ZC, WM9090_IN2_VU | WM9090_IN2A_ZC); snd_soc_update_bits(codec, WM9090_IN2_LINE_INPUT_B_VOLUME, WM9090_IN2_VU | WM9090_IN2B_ZC, WM9090_IN2_VU | WM9090_IN2B_ZC); snd_soc_update_bits(codec, WM9090_SPEAKER_VOLUME_LEFT, WM9090_SPKOUT_VU | WM9090_SPKOUTL_ZC, WM9090_SPKOUT_VU | WM9090_SPKOUTL_ZC); snd_soc_update_bits(codec, WM9090_LEFT_OUTPUT_VOLUME, WM9090_HPOUT1_VU | WM9090_HPOUT1L_ZC, WM9090_HPOUT1_VU | WM9090_HPOUT1L_ZC); snd_soc_update_bits(codec, WM9090_RIGHT_OUTPUT_VOLUME, WM9090_HPOUT1_VU | WM9090_HPOUT1R_ZC, WM9090_HPOUT1_VU | WM9090_HPOUT1R_ZC); snd_soc_update_bits(codec, WM9090_CLOCKING_1, WM9090_TOCLK_ENA, WM9090_TOCLK_ENA); wm9090_set_bias_level(codec, SND_SOC_BIAS_STANDBY); wm9090_add_controls(codec); return 0; } #ifdef CONFIG_PM static int wm9090_suspend(struct snd_soc_codec *codec) { wm9090_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm9090_resume(struct snd_soc_codec *codec) { wm9090_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } #else #define wm9090_suspend NULL #define wm9090_resume NULL #endif static int wm9090_remove(struct snd_soc_codec *codec) { wm9090_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm9090 = { .probe = wm9090_probe, .remove = wm9090_remove, .suspend = wm9090_suspend, .resume = wm9090_resume, .set_bias_level = wm9090_set_bias_level, }; static const struct regmap_config wm9090_regmap = { .reg_bits = 8, .val_bits = 16, .max_register = WM9090_MAX_REGISTER, .volatile_reg = wm9090_volatile, .readable_reg = wm9090_readable, .cache_type = REGCACHE_RBTREE, .reg_defaults = wm9090_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm9090_reg_defaults), }; static int wm9090_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm9090_priv *wm9090; unsigned int reg; int ret; wm9090 = devm_kzalloc(&i2c->dev, sizeof(*wm9090), GFP_KERNEL); if (wm9090 == NULL) { dev_err(&i2c->dev, "Can not allocate memory\n"); return -ENOMEM; } wm9090->regmap = devm_regmap_init_i2c(i2c, &wm9090_regmap); if (IS_ERR(wm9090->regmap)) { ret = PTR_ERR(wm9090->regmap); dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret); return ret; } ret = regmap_read(wm9090->regmap, WM9090_SOFTWARE_RESET, &reg); if (ret < 0) return ret; if (reg != 0x9093) { dev_err(&i2c->dev, "Device is not a WM9090, ID=%x\n", reg); return -ENODEV; } ret = regmap_write(wm9090->regmap, WM9090_SOFTWARE_RESET, 0); if (ret < 0) return ret; if (i2c->dev.platform_data) memcpy(&wm9090->pdata, i2c->dev.platform_data, sizeof(wm9090->pdata)); i2c_set_clientdata(i2c, wm9090); ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm9090, NULL, 0); if (ret != 0) { dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret); return ret; } return 0; } static int wm9090_i2c_remove(struct i2c_client *i2c) { snd_soc_unregister_codec(&i2c->dev); return 0; } static const struct i2c_device_id wm9090_id[] = { { "wm9090", 0 }, { "wm9093", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm9090_id); static struct i2c_driver wm9090_i2c_driver = { .driver = { .name = "wm9090", .owner = THIS_MODULE, }, .probe = wm9090_i2c_probe, .remove = wm9090_i2c_remove, .id_table = wm9090_id, }; module_i2c_driver(wm9090_i2c_driver); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("WM9090 ASoC driver"); MODULE_LICENSE("GPL");
gpl-2.0
ezeeyahoo/android_kernel_motorola_msm8916
arch/arm/mach-omap1/reset.c
3234
1670
/* * OMAP1 reset support */ #include <linux/kernel.h> #include <linux/io.h> #include <linux/reboot.h> #include <mach/hardware.h> #include "iomap.h" #include "common.h" /* ARM_SYSST bit shifts related to SoC reset sources */ #define ARM_SYSST_POR_SHIFT 5 #define ARM_SYSST_EXT_RST_SHIFT 4 #define ARM_SYSST_ARM_WDRST_SHIFT 2 #define ARM_SYSST_GLOB_SWRST_SHIFT 1 /* Standardized reset source bits (across all OMAP SoCs) */ #define OMAP_GLOBAL_COLD_RST_SRC_ID_SHIFT 0 #define OMAP_GLOBAL_WARM_RST_SRC_ID_SHIFT 1 #define OMAP_MPU_WD_RST_SRC_ID_SHIFT 3 #define OMAP_EXTWARM_RST_SRC_ID_SHIFT 5 void omap1_restart(enum reboot_mode mode, const char *cmd) { /* * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28 * "Global Software Reset Affects Traffic Controller Frequency". */ if (cpu_is_omap5912()) { omap_writew(omap_readw(DPLL_CTL) & ~(1 << 4), DPLL_CTL); omap_writew(0x8, ARM_RSTCT1); } omap_writew(1, ARM_RSTCT1); } /** * omap1_get_reset_sources - return the source of the SoC's last reset * * Returns bits that represent the last reset source for the SoC. The * format is standardized across OMAPs for use by the OMAP watchdog. */ u32 omap1_get_reset_sources(void) { u32 ret = 0; u16 rs; rs = __raw_readw(OMAP1_IO_ADDRESS(ARM_SYSST)); if (rs & (1 << ARM_SYSST_POR_SHIFT)) ret |= 1 << OMAP_GLOBAL_COLD_RST_SRC_ID_SHIFT; if (rs & (1 << ARM_SYSST_EXT_RST_SHIFT)) ret |= 1 << OMAP_EXTWARM_RST_SRC_ID_SHIFT; if (rs & (1 << ARM_SYSST_ARM_WDRST_SHIFT)) ret |= 1 << OMAP_MPU_WD_RST_SRC_ID_SHIFT; if (rs & (1 << ARM_SYSST_GLOB_SWRST_SHIFT)) ret |= 1 << OMAP_GLOBAL_WARM_RST_SRC_ID_SHIFT; return ret; }
gpl-2.0
namko/MID-Kernel-3.0
drivers/net/wireless/orinoco/airport.c
3490
6249
/* airport.c * * A driver for "Hermes" chipset based Apple Airport wireless * card. * * Copyright notice & release notes in file main.c * * Note specific to airport stub: * * 0.05 : first version of the new split driver * 0.06 : fix possible hang on powerup, add sleep support */ #define DRIVER_NAME "airport" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <asm/pmac_feature.h> #include "orinoco.h" #define AIRPORT_IO_LEN (0x1000) /* one page */ struct airport { struct macio_dev *mdev; void __iomem *vaddr; unsigned int irq; int irq_requested; int ndev_registered; }; static int airport_suspend(struct macio_dev *mdev, pm_message_t state) { struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev); struct net_device *dev = priv->ndev; struct airport *card = priv->card; unsigned long flags; int err; printk(KERN_DEBUG "%s: Airport entering sleep mode\n", dev->name); err = orinoco_lock(priv, &flags); if (err) { printk(KERN_ERR "%s: hw_unavailable on PBOOK_SLEEP_NOW\n", dev->name); return 0; } orinoco_down(priv); orinoco_unlock(priv, &flags); disable_irq(card->irq); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 0); return 0; } static int airport_resume(struct macio_dev *mdev) { struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev); struct net_device *dev = priv->ndev; struct airport *card = priv->card; unsigned long flags; int err; printk(KERN_DEBUG "%s: Airport waking up\n", dev->name); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 1); msleep(200); enable_irq(card->irq); priv->hw.ops->lock_irqsave(&priv->lock, &flags); err = orinoco_up(priv); priv->hw.ops->unlock_irqrestore(&priv->lock, &flags); return err; } static int airport_detach(struct macio_dev *mdev) { struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev); struct airport *card = priv->card; if (card->ndev_registered) orinoco_if_del(priv); card->ndev_registered = 0; if (card->irq_requested) free_irq(card->irq, priv); card->irq_requested = 0; if (card->vaddr) iounmap(card->vaddr); card->vaddr = NULL; macio_release_resource(mdev, 0); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 0); ssleep(1); macio_set_drvdata(mdev, NULL); free_orinocodev(priv); return 0; } static int airport_hard_reset(struct orinoco_private *priv) { /* It would be nice to power cycle the Airport for a real hard * reset, but for some reason although it appears to * re-initialize properly, it falls in a screaming heap * shortly afterwards. */ #if 0 struct airport *card = priv->card; /* Vitally important. If we don't do this it seems we get an * interrupt somewhere during the power cycle, since * hw_unavailable is already set it doesn't get ACKed, we get * into an interrupt loop and the PMU decides to turn us * off. */ disable_irq(card->irq); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(card->mdev), 0, 0); ssleep(1); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(card->mdev), 0, 1); ssleep(1); enable_irq(card->irq); ssleep(1); #endif return 0; } static int airport_attach(struct macio_dev *mdev, const struct of_device_id *match) { struct orinoco_private *priv; struct airport *card; unsigned long phys_addr; hermes_t *hw; if (macio_resource_count(mdev) < 1 || macio_irq_count(mdev) < 1) { printk(KERN_ERR PFX "Wrong interrupt/addresses in OF tree\n"); return -ENODEV; } /* Allocate space for private device-specific data */ priv = alloc_orinocodev(sizeof(*card), &mdev->ofdev.dev, airport_hard_reset, NULL); if (!priv) { printk(KERN_ERR PFX "Cannot allocate network device\n"); return -ENODEV; } card = priv->card; hw = &priv->hw; card->mdev = mdev; if (macio_request_resource(mdev, 0, DRIVER_NAME)) { printk(KERN_ERR PFX "can't request IO resource !\n"); free_orinocodev(priv); return -EBUSY; } macio_set_drvdata(mdev, priv); /* Setup interrupts & base address */ card->irq = macio_irq(mdev, 0); phys_addr = macio_resource_start(mdev, 0); /* Physical address */ printk(KERN_DEBUG PFX "Physical address %lx\n", phys_addr); card->vaddr = ioremap(phys_addr, AIRPORT_IO_LEN); if (!card->vaddr) { printk(KERN_ERR PFX "ioremap() failed\n"); goto failed; } hermes_struct_init(hw, card->vaddr, HERMES_16BIT_REGSPACING); /* Power up card */ pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 1); ssleep(1); /* Reset it before we get the interrupt */ hw->ops->init(hw); if (request_irq(card->irq, orinoco_interrupt, 0, DRIVER_NAME, priv)) { printk(KERN_ERR PFX "Couldn't get IRQ %d\n", card->irq); goto failed; } card->irq_requested = 1; /* Initialise the main driver */ if (orinoco_init(priv) != 0) { printk(KERN_ERR PFX "orinoco_init() failed\n"); goto failed; } /* Register an interface with the stack */ if (orinoco_if_add(priv, phys_addr, card->irq, NULL) != 0) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); goto failed; } card->ndev_registered = 1; return 0; failed: airport_detach(mdev); return -ENODEV; } /* airport_attach */ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION " (Benjamin Herrenschmidt <benh@kernel.crashing.org>)"; MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("Driver for the Apple Airport wireless card."); MODULE_LICENSE("Dual MPL/GPL"); static struct of_device_id airport_match[] = { { .name = "radio", }, {}, }; MODULE_DEVICE_TABLE(of, airport_match); static struct macio_driver airport_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = airport_match, }, .probe = airport_attach, .remove = airport_detach, .suspend = airport_suspend, .resume = airport_resume, }; static int __init init_airport(void) { printk(KERN_DEBUG "%s\n", version); return macio_register_driver(&airport_driver); } static void __exit exit_airport(void) { macio_unregister_driver(&airport_driver); } module_init(init_airport); module_exit(exit_airport);
gpl-2.0
f1vefour/mako
drivers/hid/uhid.c
3490
12392
/* * User-space I/O driver support for HID subsystem * Copyright (c) 2012 David Herrmann */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/atomic.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/hid.h> #include <linux/input.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/uhid.h> #include <linux/wait.h> #define UHID_NAME "uhid" #define UHID_BUFSIZE 32 struct uhid_device { struct mutex devlock; bool running; __u8 *rd_data; uint rd_size; struct hid_device *hid; struct uhid_event input_buf; wait_queue_head_t waitq; spinlock_t qlock; __u8 head; __u8 tail; struct uhid_event *outq[UHID_BUFSIZE]; struct mutex report_lock; wait_queue_head_t report_wait; atomic_t report_done; atomic_t report_id; struct uhid_event report_buf; }; static struct miscdevice uhid_misc; static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev) { __u8 newhead; newhead = (uhid->head + 1) % UHID_BUFSIZE; if (newhead != uhid->tail) { uhid->outq[uhid->head] = ev; uhid->head = newhead; wake_up_interruptible(&uhid->waitq); } else { hid_warn(uhid->hid, "Output queue is full\n"); kfree(ev); } } static int uhid_queue_event(struct uhid_device *uhid, __u32 event) { unsigned long flags; struct uhid_event *ev; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = event; spin_lock_irqsave(&uhid->qlock, flags); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); return 0; } static int uhid_hid_start(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; return uhid_queue_event(uhid, UHID_START); } static void uhid_hid_stop(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; hid->claimed = 0; uhid_queue_event(uhid, UHID_STOP); } static int uhid_hid_open(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; return uhid_queue_event(uhid, UHID_OPEN); } static void uhid_hid_close(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; uhid_queue_event(uhid, UHID_CLOSE); } static int uhid_hid_input(struct input_dev *input, unsigned int type, unsigned int code, int value) { struct hid_device *hid = input_get_drvdata(input); struct uhid_device *uhid = hid->driver_data; unsigned long flags; struct uhid_event *ev; ev = kzalloc(sizeof(*ev), GFP_ATOMIC); if (!ev) return -ENOMEM; ev->type = UHID_OUTPUT_EV; ev->u.output_ev.type = type; ev->u.output_ev.code = code; ev->u.output_ev.value = value; spin_lock_irqsave(&uhid->qlock, flags); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); return 0; } static int uhid_hid_parse(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; return hid_parse_report(hid, uhid->rd_data, uhid->rd_size); } static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum, __u8 *buf, size_t count, unsigned char rtype) { struct uhid_device *uhid = hid->driver_data; __u8 report_type; struct uhid_event *ev; unsigned long flags; int ret; size_t uninitialized_var(len); struct uhid_feature_answer_req *req; if (!uhid->running) return -EIO; switch (rtype) { case HID_FEATURE_REPORT: report_type = UHID_FEATURE_REPORT; break; case HID_OUTPUT_REPORT: report_type = UHID_OUTPUT_REPORT; break; case HID_INPUT_REPORT: report_type = UHID_INPUT_REPORT; break; default: return -EINVAL; } ret = mutex_lock_interruptible(&uhid->report_lock); if (ret) return ret; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) { ret = -ENOMEM; goto unlock; } spin_lock_irqsave(&uhid->qlock, flags); ev->type = UHID_FEATURE; ev->u.feature.id = atomic_inc_return(&uhid->report_id); ev->u.feature.rnum = rnum; ev->u.feature.rtype = report_type; atomic_set(&uhid->report_done, 0); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); ret = wait_event_interruptible_timeout(uhid->report_wait, atomic_read(&uhid->report_done), 5 * HZ); /* * Make sure "uhid->running" is cleared on shutdown before * "uhid->report_done" is set. */ smp_rmb(); if (!ret || !uhid->running) { ret = -EIO; } else if (ret < 0) { ret = -ERESTARTSYS; } else { spin_lock_irqsave(&uhid->qlock, flags); req = &uhid->report_buf.u.feature_answer; if (req->err) { ret = -EIO; } else { ret = 0; len = min(count, min_t(size_t, req->size, UHID_DATA_MAX)); memcpy(buf, req->data, len); } spin_unlock_irqrestore(&uhid->qlock, flags); } atomic_set(&uhid->report_done, 1); unlock: mutex_unlock(&uhid->report_lock); return ret ? ret : len; } static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count, unsigned char report_type) { struct uhid_device *uhid = hid->driver_data; __u8 rtype; unsigned long flags; struct uhid_event *ev; switch (report_type) { case HID_FEATURE_REPORT: rtype = UHID_FEATURE_REPORT; break; case HID_OUTPUT_REPORT: rtype = UHID_OUTPUT_REPORT; break; default: return -EINVAL; } if (count < 1 || count > UHID_DATA_MAX) return -EINVAL; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_OUTPUT; ev->u.output.size = count; ev->u.output.rtype = rtype; memcpy(ev->u.output.data, buf, count); spin_lock_irqsave(&uhid->qlock, flags); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); return count; } static struct hid_ll_driver uhid_hid_driver = { .start = uhid_hid_start, .stop = uhid_hid_stop, .open = uhid_hid_open, .close = uhid_hid_close, .hidinput_input_event = uhid_hid_input, .parse = uhid_hid_parse, }; static int uhid_dev_create(struct uhid_device *uhid, const struct uhid_event *ev) { struct hid_device *hid; int ret; if (uhid->running) return -EALREADY; uhid->rd_size = ev->u.create.rd_size; if (uhid->rd_size <= 0 || uhid->rd_size > HID_MAX_DESCRIPTOR_SIZE) return -EINVAL; uhid->rd_data = kmalloc(uhid->rd_size, GFP_KERNEL); if (!uhid->rd_data) return -ENOMEM; if (copy_from_user(uhid->rd_data, ev->u.create.rd_data, uhid->rd_size)) { ret = -EFAULT; goto err_free; } hid = hid_allocate_device(); if (IS_ERR(hid)) { ret = PTR_ERR(hid); goto err_free; } strncpy(hid->name, ev->u.create.name, 127); hid->name[127] = 0; strncpy(hid->phys, ev->u.create.phys, 63); hid->phys[63] = 0; strncpy(hid->uniq, ev->u.create.uniq, 63); hid->uniq[63] = 0; hid->ll_driver = &uhid_hid_driver; hid->hid_get_raw_report = uhid_hid_get_raw; hid->hid_output_raw_report = uhid_hid_output_raw; hid->bus = ev->u.create.bus; hid->vendor = ev->u.create.vendor; hid->product = ev->u.create.product; hid->version = ev->u.create.version; hid->country = ev->u.create.country; hid->driver_data = uhid; hid->dev.parent = uhid_misc.this_device; uhid->hid = hid; uhid->running = true; ret = hid_add_device(hid); if (ret) { hid_err(hid, "Cannot register HID device\n"); goto err_hid; } return 0; err_hid: hid_destroy_device(hid); uhid->hid = NULL; uhid->running = false; err_free: kfree(uhid->rd_data); return ret; } static int uhid_dev_destroy(struct uhid_device *uhid) { if (!uhid->running) return -EINVAL; /* clear "running" before setting "report_done" */ uhid->running = false; smp_wmb(); atomic_set(&uhid->report_done, 1); wake_up_interruptible(&uhid->report_wait); hid_destroy_device(uhid->hid); kfree(uhid->rd_data); return 0; } static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev) { if (!uhid->running) return -EINVAL; hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data, min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0); return 0; } static int uhid_dev_feature_answer(struct uhid_device *uhid, struct uhid_event *ev) { unsigned long flags; if (!uhid->running) return -EINVAL; spin_lock_irqsave(&uhid->qlock, flags); /* id for old report; drop it silently */ if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id) goto unlock; if (atomic_read(&uhid->report_done)) goto unlock; memcpy(&uhid->report_buf, ev, sizeof(*ev)); atomic_set(&uhid->report_done, 1); wake_up_interruptible(&uhid->report_wait); unlock: spin_unlock_irqrestore(&uhid->qlock, flags); return 0; } static int uhid_char_open(struct inode *inode, struct file *file) { struct uhid_device *uhid; uhid = kzalloc(sizeof(*uhid), GFP_KERNEL); if (!uhid) return -ENOMEM; mutex_init(&uhid->devlock); mutex_init(&uhid->report_lock); spin_lock_init(&uhid->qlock); init_waitqueue_head(&uhid->waitq); init_waitqueue_head(&uhid->report_wait); uhid->running = false; atomic_set(&uhid->report_done, 1); file->private_data = uhid; nonseekable_open(inode, file); return 0; } static int uhid_char_release(struct inode *inode, struct file *file) { struct uhid_device *uhid = file->private_data; unsigned int i; uhid_dev_destroy(uhid); for (i = 0; i < UHID_BUFSIZE; ++i) kfree(uhid->outq[i]); kfree(uhid); return 0; } static ssize_t uhid_char_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct uhid_device *uhid = file->private_data; int ret; unsigned long flags; size_t len; /* they need at least the "type" member of uhid_event */ if (count < sizeof(__u32)) return -EINVAL; try_again: if (file->f_flags & O_NONBLOCK) { if (uhid->head == uhid->tail) return -EAGAIN; } else { ret = wait_event_interruptible(uhid->waitq, uhid->head != uhid->tail); if (ret) return ret; } ret = mutex_lock_interruptible(&uhid->devlock); if (ret) return ret; if (uhid->head == uhid->tail) { mutex_unlock(&uhid->devlock); goto try_again; } else { len = min(count, sizeof(**uhid->outq)); if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) { ret = -EFAULT; } else { kfree(uhid->outq[uhid->tail]); uhid->outq[uhid->tail] = NULL; spin_lock_irqsave(&uhid->qlock, flags); uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE; spin_unlock_irqrestore(&uhid->qlock, flags); } } mutex_unlock(&uhid->devlock); return ret ? ret : len; } static ssize_t uhid_char_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct uhid_device *uhid = file->private_data; int ret; size_t len; /* we need at least the "type" member of uhid_event */ if (count < sizeof(__u32)) return -EINVAL; ret = mutex_lock_interruptible(&uhid->devlock); if (ret) return ret; memset(&uhid->input_buf, 0, sizeof(uhid->input_buf)); len = min(count, sizeof(uhid->input_buf)); if (copy_from_user(&uhid->input_buf, buffer, len)) { ret = -EFAULT; goto unlock; } switch (uhid->input_buf.type) { case UHID_CREATE: ret = uhid_dev_create(uhid, &uhid->input_buf); break; case UHID_DESTROY: ret = uhid_dev_destroy(uhid); break; case UHID_INPUT: ret = uhid_dev_input(uhid, &uhid->input_buf); break; case UHID_FEATURE_ANSWER: ret = uhid_dev_feature_answer(uhid, &uhid->input_buf); break; default: ret = -EOPNOTSUPP; } unlock: mutex_unlock(&uhid->devlock); /* return "count" not "len" to not confuse the caller */ return ret ? ret : count; } static unsigned int uhid_char_poll(struct file *file, poll_table *wait) { struct uhid_device *uhid = file->private_data; poll_wait(file, &uhid->waitq, wait); if (uhid->head != uhid->tail) return POLLIN | POLLRDNORM; return 0; } static const struct file_operations uhid_fops = { .owner = THIS_MODULE, .open = uhid_char_open, .release = uhid_char_release, .read = uhid_char_read, .write = uhid_char_write, .poll = uhid_char_poll, .llseek = no_llseek, }; static struct miscdevice uhid_misc = { .fops = &uhid_fops, .minor = MISC_DYNAMIC_MINOR, .name = UHID_NAME, }; static int __init uhid_init(void) { return misc_register(&uhid_misc); } static void __exit uhid_exit(void) { misc_deregister(&uhid_misc); } module_init(uhid_init); module_exit(uhid_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
gpl-2.0