repo_name
string
path
string
copies
string
size
string
content
string
license
string
ZdrowyGosciu/kernel_lge_d802_v30d
drivers/broadcast/tdmb/fc8080/src/fc8080_spi.c
246
7604
/***************************************************************************** Copyright(c) 2013 FCI Inc. All Rights Reserved File name : fc8080_spi.c Description : spi interface source file This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA History : ---------------------------------------------------------------------- *******************************************************************************/ #include <linux/input.h> #include <linux/spi/spi.h> #include "../inc/broadcast_fc8080.h" #include "../inc/fci_types.h" #include "../inc/fc8080_regs.h" #include "../inc/fci_oal.h" #define SPI_BMODE 0x00 #define SPI_WMODE 0x04 #define SPI_LMODE 0x08 #define SPI_RD_THRESH 0x30 #define SPI_RD_REG 0x20 #define SPI_READ 0x40 #define SPI_WRITE 0x00 #define SPI_AINC 0x80 #define CHIPID 0 #define DRIVER_NAME "fc8080_spi" struct spi_device *fc8080_spi = NULL; #ifdef USE_QCT_DMA_LGE #include <linux/miscdevice.h> static fci_u8 tx_data[10] __cacheline_aligned; static fci_u8 tdata_buf[40] __cacheline_aligned ;//= {0}; static fci_u8 rdata_buf[8196] __cacheline_aligned;// = {0}; #else static fci_u8 tx_data[10]; static fci_u8 tdata_buf[40] = {0}; static fci_u8 rdata_buf[8196] = {0}; #endif static DEFINE_MUTEX(lock); extern struct spi_device *tdmb_fc8080_get_spi_device(void); int fc8080_spi_write_then_read( struct spi_device *spi , fci_u8 *txbuf , fci_u16 tx_length , fci_u8 *rxbuf , fci_u16 rx_length) { fci_s32 res; struct spi_message message; struct spi_transfer x; spi_message_init(&message); memset(&x, 0, sizeof x); spi_message_add_tail(&x, &message); memcpy(tdata_buf, txbuf, tx_length); x.tx_buf = tdata_buf; x.rx_buf = rdata_buf; x.len = tx_length + rx_length; res = spi_sync(spi, &message); memcpy(rxbuf, x.rx_buf + tx_length, rx_length); return res; } int fc8080_spi_write_then_read_burst( struct spi_device *spi , fci_u8 *txbuf , fci_u16 tx_length , fci_u8 *rxbuf , fci_u16 rx_length) { fci_s32 res; struct spi_message message; struct spi_transfer x; spi_message_init(&message); memset(&x, 0, sizeof x); spi_message_add_tail(&x, &message); x.tx_buf = txbuf; x.rx_buf = rxbuf; x.len = tx_length + rx_length; res = spi_sync(spi, &message); return res; } static fci_s32 spi_bulkread(HANDLE handle, fci_u16 addr, fci_u8 command, fci_u8 *data, fci_u16 length) { fci_s32 res = BBM_OK; tx_data[0] = (fci_u8) (addr & 0xff); tx_data[1] = (fci_u8) ((addr >> 8) & 0xff); tx_data[2] = (fci_u8) ((command & 0xfc) | CHIPID); tx_data[3] = (fci_u8) (length & 0xff); res = fc8080_spi_write_then_read( fc8080_spi, &tx_data[0], 4, &data[0], length); if (res) { print_log(0, "fc8080_spi_bulkread fail : %d\n", res); return BBM_NOK; } return BBM_OK; } static fci_s32 spi_bulkwrite(HANDLE handle, fci_u16 addr, fci_u8 command, fci_u8 *data, fci_u16 length) { fci_s32 res = BBM_OK; fci_s32 i = 0; tx_data[0] = (fci_u8) (addr & 0xff); tx_data[1] = (fci_u8) ((addr >> 8) & 0xff); tx_data[2] = (fci_u8) ((command & 0xfc) | CHIPID); tx_data[3] = (fci_u8) (length & 0xff); for (i = 0; i < length; i++) tx_data[4+i] = data[i]; res = fc8080_spi_write_then_read( fc8080_spi, &tx_data[0], length+4, NULL, 0); if (res) { print_log(0, "fc8080_spi_bulkwrite fail : %d\n", res); return BBM_NOK; } return BBM_OK; } static fci_s32 spi_dataread(HANDLE handle, fci_u8 addr, fci_u8 command, fci_u8 *data, fci_u32 length) { fci_s32 res = BBM_OK; tx_data[0] = (fci_u8) (addr & 0xff); tx_data[1] = (fci_u8) ((addr >> 8) & 0xff); tx_data[2] = (fci_u8) ((command & 0xfc) | CHIPID); tx_data[3] = (fci_u8) (length & 0xff); if(length > 384) res = fc8080_spi_write_then_read_burst(fc8080_spi , &tx_data[0], 4, &data[0], length); else res = fc8080_spi_write_then_read(fc8080_spi , &tx_data[0], 4, &data[0], length); if (res) { print_log(0, "fc8080_spi_dataread fail : %d\n", res); return BBM_NOK; } return BBM_OK; } int fc8080_spi_init(HANDLE hDevice, fci_u16 param1, fci_u16 param2) { fc8080_spi = tdmb_fc8080_get_spi_device(); if(fc8080_spi == NULL) { printk("spi device is not ready \n"); return BBM_NOK; } return BBM_OK; } fci_s32 fc8080_spi_byteread(HANDLE handle, fci_u16 addr, fci_u8 *data) { fci_s32 res; fci_u8 command = SPI_READ; mutex_lock(&lock); res = spi_bulkread(handle, addr, command, data, 1); mutex_unlock(&lock); return res; } fci_s32 fc8080_spi_wordread(HANDLE handle, fci_u16 addr, fci_u16 *data) { fci_s32 res; fci_u8 command = SPI_READ | SPI_AINC; mutex_lock(&lock); res = spi_bulkread(handle, addr, command, (fci_u8 *) data, 2); mutex_unlock(&lock); return res; } fci_s32 fc8080_spi_longread(HANDLE handle, fci_u16 addr, fci_u32 *data) { fci_s32 res; fci_u8 command = SPI_READ | SPI_AINC; mutex_lock(&lock); res = spi_bulkread(handle, addr, command, (fci_u8 *) data, 4); mutex_unlock(&lock); return res; } fci_s32 fc8080_spi_bulkread(HANDLE handle, fci_u16 addr, fci_u8 *data, fci_u16 length) { fci_s32 i; fci_u16 x, y; fci_s32 res = BBM_OK; fci_u8 command = SPI_READ | SPI_AINC; x = length / 255; y = length % 255; mutex_lock(&lock); for (i = 0; i < x; i++, addr += 255) res |= spi_bulkread(handle, addr, command, &data[i * 255], 255); if (y) res |= spi_bulkread(handle, addr, command, &data[x * 255], y); mutex_unlock(&lock); return res; } fci_s32 fc8080_spi_bytewrite(HANDLE handle, fci_u16 addr, fci_u8 data) { fci_s32 res; fci_u8 command = SPI_WRITE; mutex_lock(&lock); res = spi_bulkwrite(handle, addr, command, (fci_u8 *) &data, 1); mutex_unlock(&lock); return res; } fci_s32 fc8080_spi_wordwrite(HANDLE handle, fci_u16 addr, fci_u16 data) { fci_s32 res; fci_u8 command = SPI_WRITE; if ((addr & 0xff00) != 0x0f00) command |= SPI_AINC; mutex_lock(&lock); res = spi_bulkwrite(handle, addr, command, (fci_u8 *) &data, 2); mutex_unlock(&lock); return res; } fci_s32 fc8080_spi_longwrite(HANDLE handle, fci_u16 addr, fci_u32 data) { fci_s32 res; fci_u8 command = SPI_WRITE | SPI_AINC; mutex_lock(&lock); res = spi_bulkwrite(handle, addr, command, (fci_u8 *) &data, 4); mutex_unlock(&lock); return res; } fci_s32 fc8080_spi_bulkwrite(HANDLE handle, fci_u16 addr, fci_u8 *data, fci_u16 length) { fci_s32 i; fci_u16 x, y; fci_s32 res = BBM_OK; fci_u8 command = SPI_WRITE | SPI_AINC; x = length / 255; y = length % 255; mutex_lock(&lock); for (i = 0; i < x; i++, addr += 255) res |= spi_bulkwrite(handle, addr, command, &data[i * 255], 255); if (y) res |= spi_bulkwrite(handle, addr, command, &data[x * 255], y); mutex_unlock(&lock); return res; } fci_s32 fc8080_spi_dataread(HANDLE handle, fci_u16 addr, fci_u8 *data, fci_u32 length) { fci_s32 res; fci_u8 command = SPI_READ | SPI_RD_THRESH; mutex_lock(&lock); res = spi_dataread(handle, addr, command, data, length); mutex_unlock(&lock); return res; } fci_s32 fc8080_spi_deinit(HANDLE handle) { return BBM_OK; }
gpl-2.0
math4youbyusgroupillinois/KVMGT-kernel
drivers/media/platform/omap3isp/ispqueue.c
246
29587
/* * ispqueue.c * * TI OMAP3 ISP - Video buffers queue handling * * Copyright (C) 2010 Nokia Corporation * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <asm/cacheflush.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/poll.h> #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include "ispqueue.h" /* ----------------------------------------------------------------------------- * Video buffers management */ /* * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP * * The typical operation required here is Cache Invalidation across * the (user space) buffer address range. And this _must_ be done * at QBUF stage (and *only* at QBUF). * * We try to use optimal cache invalidation function: * - dmac_map_area: * - used when the number of pages are _low_. * - it becomes quite slow as the number of pages increase. * - for 648x492 viewfinder (150 pages) it takes 1.3 ms. * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms. * * - flush_cache_all: * - used when the number of pages are _high_. * - time taken in the range of 500-900 us. * - has a higher penalty but, as whole dcache + icache is invalidated */ /* * FIXME: dmac_inv_range crashes randomly on the user space buffer * address. Fall back to flush_cache_all for now. */ #define ISP_CACHE_FLUSH_PAGES_MAX 0 static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf) { if (buf->skip_cache) return; if (buf->vbuf.m.userptr == 0 || buf->npages == 0 || buf->npages > ISP_CACHE_FLUSH_PAGES_MAX) flush_cache_all(); else { dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length, DMA_FROM_DEVICE); outer_inv_range(buf->vbuf.m.userptr, buf->vbuf.m.userptr + buf->vbuf.length); } } /* * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped * * Lock the VMAs underlying the given buffer into memory. This avoids the * userspace buffer mapping from being swapped out, making VIPT cache handling * easier. * * Note that the pages will not be freed as the buffers have been locked to * memory using by a call to get_user_pages(), but the userspace mapping could * still disappear if the VMAs are not locked. This is caused by the memory * management code trying to be as lock-less as possible, which results in the * userspace mapping manager not finding out that the pages are locked under * some conditions. */ static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock) { struct vm_area_struct *vma; unsigned long start; unsigned long end; int ret = 0; if (buf->vbuf.memory == V4L2_MEMORY_MMAP) return 0; /* We can be called from workqueue context if the current task dies to * unlock the VMAs. In that case there's no current memory management * context so unlocking can't be performed, but the VMAs have been or * are getting destroyed anyway so it doesn't really matter. */ if (!current || !current->mm) return lock ? -EINVAL : 0; start = buf->vbuf.m.userptr; end = buf->vbuf.m.userptr + buf->vbuf.length - 1; down_write(&current->mm->mmap_sem); spin_lock(&current->mm->page_table_lock); do { vma = find_vma(current->mm, start); if (vma == NULL) { ret = -EFAULT; goto out; } if (lock) vma->vm_flags |= VM_LOCKED; else vma->vm_flags &= ~VM_LOCKED; start = vma->vm_end + 1; } while (vma->vm_end < end); if (lock) buf->vm_flags |= VM_LOCKED; else buf->vm_flags &= ~VM_LOCKED; out: spin_unlock(&current->mm->page_table_lock); up_write(&current->mm->mmap_sem); return ret; } /* * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer * * Iterate over the vmalloc'ed area and create a scatter list entry for every * page. */ static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf) { struct scatterlist *sglist; unsigned int npages; unsigned int i; void *addr; addr = buf->vaddr; npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT; sglist = vmalloc(npages * sizeof(*sglist)); if (sglist == NULL) return -ENOMEM; sg_init_table(sglist, npages); for (i = 0; i < npages; ++i, addr += PAGE_SIZE) { struct page *page = vmalloc_to_page(addr); if (page == NULL || PageHighMem(page)) { vfree(sglist); return -EINVAL; } sg_set_page(&sglist[i], page, PAGE_SIZE, 0); } buf->sglen = npages; buf->sglist = sglist; return 0; } /* * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer * * Walk the buffer pages list and create a 1:1 mapping to a scatter list. */ static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf) { struct scatterlist *sglist; unsigned int offset = buf->offset; unsigned int i; sglist = vmalloc(buf->npages * sizeof(*sglist)); if (sglist == NULL) return -ENOMEM; sg_init_table(sglist, buf->npages); for (i = 0; i < buf->npages; ++i) { if (PageHighMem(buf->pages[i])) { vfree(sglist); return -EINVAL; } sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset, offset); offset = 0; } buf->sglen = buf->npages; buf->sglist = sglist; return 0; } /* * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer * * Create a scatter list of physically contiguous pages starting at the buffer * memory physical address. */ static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf) { struct scatterlist *sglist; unsigned int offset = buf->offset; unsigned long pfn = buf->paddr >> PAGE_SHIFT; unsigned int i; sglist = vmalloc(buf->npages * sizeof(*sglist)); if (sglist == NULL) return -ENOMEM; sg_init_table(sglist, buf->npages); for (i = 0; i < buf->npages; ++i, ++pfn) { sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset, offset); /* PFNMAP buffers will not get DMA-mapped, set the DMA address * manually. */ sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset; offset = 0; } buf->sglen = buf->npages; buf->sglist = sglist; return 0; } /* * isp_video_buffer_cleanup - Release pages for a userspace VMA. * * Release pages locked by a call isp_video_buffer_prepare_user and free the * pages table. */ static void isp_video_buffer_cleanup(struct isp_video_buffer *buf) { enum dma_data_direction direction; unsigned int i; if (buf->queue->ops->buffer_cleanup) buf->queue->ops->buffer_cleanup(buf); if (!(buf->vm_flags & VM_PFNMAP)) { direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? DMA_FROM_DEVICE : DMA_TO_DEVICE; dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen, direction); } vfree(buf->sglist); buf->sglist = NULL; buf->sglen = 0; if (buf->pages != NULL) { isp_video_buffer_lock_vma(buf, 0); for (i = 0; i < buf->npages; ++i) page_cache_release(buf->pages[i]); vfree(buf->pages); buf->pages = NULL; } buf->npages = 0; buf->skip_cache = false; } /* * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory. * * This function creates a list of pages for a userspace VMA. The number of * pages is first computed based on the buffer size, and pages are then * retrieved by a call to get_user_pages. * * Pages are pinned to memory by get_user_pages, making them available for DMA * transfers. However, due to memory management optimization, it seems the * get_user_pages doesn't guarantee that the pinned pages will not be written * to swap and removed from the userspace mapping(s). When this happens, a page * fault can be generated when accessing those unmapped pages. * * If the fault is triggered by a page table walk caused by VIPT cache * management operations, the page fault handler might oops if the MM semaphore * is held, as it can't handle kernel page faults in that case. To fix that, a * fixup entry needs to be added to the cache management code, or the userspace * VMA must be locked to avoid removing pages from the userspace mapping in the * first place. * * If the number of pages retrieved is smaller than the number required by the * buffer size, the function returns -EFAULT. */ static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf) { unsigned long data; unsigned int first; unsigned int last; int ret; data = buf->vbuf.m.userptr; first = (data & PAGE_MASK) >> PAGE_SHIFT; last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT; buf->offset = data & ~PAGE_MASK; buf->npages = last - first + 1; buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0])); if (buf->pages == NULL) return -ENOMEM; down_read(&current->mm->mmap_sem); ret = get_user_pages(current, current->mm, data & PAGE_MASK, buf->npages, buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, buf->pages, NULL); up_read(&current->mm->mmap_sem); if (ret != buf->npages) { buf->npages = ret < 0 ? 0 : ret; isp_video_buffer_cleanup(buf); return -EFAULT; } ret = isp_video_buffer_lock_vma(buf, 1); if (ret < 0) isp_video_buffer_cleanup(buf); return ret; } /* * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer * * Userspace VM_PFNMAP buffers are supported only if they are contiguous in * memory and if they span a single VMA. * * Return 0 if the buffer is valid, or -EFAULT otherwise. */ static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf) { struct vm_area_struct *vma; unsigned long prev_pfn; unsigned long this_pfn; unsigned long start; unsigned long end; dma_addr_t pa = 0; int ret = -EFAULT; start = buf->vbuf.m.userptr; end = buf->vbuf.m.userptr + buf->vbuf.length - 1; buf->offset = start & ~PAGE_MASK; buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; buf->pages = NULL; down_read(&current->mm->mmap_sem); vma = find_vma(current->mm, start); if (vma == NULL || vma->vm_end < end) goto done; for (prev_pfn = 0; start <= end; start += PAGE_SIZE) { ret = follow_pfn(vma, start, &this_pfn); if (ret) goto done; if (prev_pfn == 0) pa = this_pfn << PAGE_SHIFT; else if (this_pfn != prev_pfn + 1) { ret = -EFAULT; goto done; } prev_pfn = this_pfn; } buf->paddr = pa + buf->offset; ret = 0; done: up_read(&current->mm->mmap_sem); return ret; } /* * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address * * This function locates the VMAs for the buffer's userspace address and checks * that their flags match. The only flag that we need to care for at the moment * is VM_PFNMAP. * * The buffer vm_flags field is set to the first VMA flags. * * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs * have incompatible flags. */ static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf) { struct vm_area_struct *vma; pgprot_t uninitialized_var(vm_page_prot); unsigned long start; unsigned long end; int ret = -EFAULT; start = buf->vbuf.m.userptr; end = buf->vbuf.m.userptr + buf->vbuf.length - 1; down_read(&current->mm->mmap_sem); do { vma = find_vma(current->mm, start); if (vma == NULL) goto done; if (start == buf->vbuf.m.userptr) { buf->vm_flags = vma->vm_flags; vm_page_prot = vma->vm_page_prot; } if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP) goto done; if (vm_page_prot != vma->vm_page_prot) goto done; start = vma->vm_end + 1; } while (vma->vm_end < end); /* Skip cache management to enhance performances for non-cached or * write-combining buffers. */ if (vm_page_prot == pgprot_noncached(vm_page_prot) || vm_page_prot == pgprot_writecombine(vm_page_prot)) buf->skip_cache = true; ret = 0; done: up_read(&current->mm->mmap_sem); return ret; } /* * isp_video_buffer_prepare - Make a buffer ready for operation * * Preparing a buffer involves: * * - validating VMAs (userspace buffers only) * - locking pages and VMAs into memory (userspace buffers only) * - building page and scatter-gather lists * - mapping buffers for DMA operation * - performing driver-specific preparation * * The function must be called in userspace context with a valid mm context * (this excludes cleanup paths such as sys_close when the userspace process * segfaults). */ static int isp_video_buffer_prepare(struct isp_video_buffer *buf) { enum dma_data_direction direction; int ret; switch (buf->vbuf.memory) { case V4L2_MEMORY_MMAP: ret = isp_video_buffer_sglist_kernel(buf); break; case V4L2_MEMORY_USERPTR: ret = isp_video_buffer_prepare_vm_flags(buf); if (ret < 0) return ret; if (buf->vm_flags & VM_PFNMAP) { ret = isp_video_buffer_prepare_pfnmap(buf); if (ret < 0) return ret; ret = isp_video_buffer_sglist_pfnmap(buf); } else { ret = isp_video_buffer_prepare_user(buf); if (ret < 0) return ret; ret = isp_video_buffer_sglist_user(buf); } break; default: return -EINVAL; } if (ret < 0) goto done; if (!(buf->vm_flags & VM_PFNMAP)) { direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? DMA_FROM_DEVICE : DMA_TO_DEVICE; ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen, direction); if (ret != buf->sglen) { ret = -EFAULT; goto done; } } if (buf->queue->ops->buffer_prepare) ret = buf->queue->ops->buffer_prepare(buf); done: if (ret < 0) { isp_video_buffer_cleanup(buf); return ret; } return ret; } /* * isp_video_queue_query - Query the status of a given buffer * * Locking: must be called with the queue lock held. */ static void isp_video_buffer_query(struct isp_video_buffer *buf, struct v4l2_buffer *vbuf) { memcpy(vbuf, &buf->vbuf, sizeof(*vbuf)); if (buf->vma_use_count) vbuf->flags |= V4L2_BUF_FLAG_MAPPED; switch (buf->state) { case ISP_BUF_STATE_ERROR: vbuf->flags |= V4L2_BUF_FLAG_ERROR; /* Fallthrough */ case ISP_BUF_STATE_DONE: vbuf->flags |= V4L2_BUF_FLAG_DONE; break; case ISP_BUF_STATE_QUEUED: case ISP_BUF_STATE_ACTIVE: vbuf->flags |= V4L2_BUF_FLAG_QUEUED; break; case ISP_BUF_STATE_IDLE: default: break; } } /* * isp_video_buffer_wait - Wait for a buffer to be ready * * In non-blocking mode, return immediately with 0 if the buffer is ready or * -EAGAIN if the buffer is in the QUEUED or ACTIVE state. * * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait * queue using the same condition. */ static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking) { if (nonblocking) { return (buf->state != ISP_BUF_STATE_QUEUED && buf->state != ISP_BUF_STATE_ACTIVE) ? 0 : -EAGAIN; } return wait_event_interruptible(buf->wait, buf->state != ISP_BUF_STATE_QUEUED && buf->state != ISP_BUF_STATE_ACTIVE); } /* ----------------------------------------------------------------------------- * Queue management */ /* * isp_video_queue_free - Free video buffers memory * * Buffers can only be freed if the queue isn't streaming and if no buffer is * mapped to userspace. Return -EBUSY if those conditions aren't statisfied. * * This function must be called with the queue lock held. */ static int isp_video_queue_free(struct isp_video_queue *queue) { unsigned int i; if (queue->streaming) return -EBUSY; for (i = 0; i < queue->count; ++i) { if (queue->buffers[i]->vma_use_count != 0) return -EBUSY; } for (i = 0; i < queue->count; ++i) { struct isp_video_buffer *buf = queue->buffers[i]; isp_video_buffer_cleanup(buf); vfree(buf->vaddr); buf->vaddr = NULL; kfree(buf); queue->buffers[i] = NULL; } INIT_LIST_HEAD(&queue->queue); queue->count = 0; return 0; } /* * isp_video_queue_alloc - Allocate video buffers memory * * This function must be called with the queue lock held. */ static int isp_video_queue_alloc(struct isp_video_queue *queue, unsigned int nbuffers, unsigned int size, enum v4l2_memory memory) { struct isp_video_buffer *buf; unsigned int i; void *mem; int ret; /* Start by freeing the buffers. */ ret = isp_video_queue_free(queue); if (ret < 0) return ret; /* Bail out if no buffers should be allocated. */ if (nbuffers == 0) return 0; /* Initialize the allocated buffers. */ for (i = 0; i < nbuffers; ++i) { buf = kzalloc(queue->bufsize, GFP_KERNEL); if (buf == NULL) break; if (memory == V4L2_MEMORY_MMAP) { /* Allocate video buffers memory for mmap mode. Align * the size to the page size. */ mem = vmalloc_32_user(PAGE_ALIGN(size)); if (mem == NULL) { kfree(buf); break; } buf->vbuf.m.offset = i * PAGE_ALIGN(size); buf->vaddr = mem; } buf->vbuf.index = i; buf->vbuf.length = size; buf->vbuf.type = queue->type; buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; buf->vbuf.field = V4L2_FIELD_NONE; buf->vbuf.memory = memory; buf->queue = queue; init_waitqueue_head(&buf->wait); queue->buffers[i] = buf; } if (i == 0) return -ENOMEM; queue->count = i; return nbuffers; } /** * omap3isp_video_queue_cleanup - Clean up the video buffers queue * @queue: Video buffers queue * * Free all allocated resources and clean up the video buffers queue. The queue * must not be busy (no ongoing video stream) and buffers must have been * unmapped. * * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been * unmapped. */ int omap3isp_video_queue_cleanup(struct isp_video_queue *queue) { return isp_video_queue_free(queue); } /** * omap3isp_video_queue_init - Initialize the video buffers queue * @queue: Video buffers queue * @type: V4L2 buffer type (capture or output) * @ops: Driver-specific queue operations * @dev: Device used for DMA operations * @bufsize: Size of the driver-specific buffer structure * * Initialize the video buffers queue with the supplied parameters. * * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet. * * Buffer objects will be allocated using the given buffer size to allow room * for driver-specific fields. Driver-specific buffer structures must start * with a struct isp_video_buffer field. Drivers with no driver-specific buffer * structure must pass the size of the isp_video_buffer structure in the bufsize * parameter. * * Return 0 on success. */ int omap3isp_video_queue_init(struct isp_video_queue *queue, enum v4l2_buf_type type, const struct isp_video_queue_operations *ops, struct device *dev, unsigned int bufsize) { INIT_LIST_HEAD(&queue->queue); mutex_init(&queue->lock); spin_lock_init(&queue->irqlock); queue->type = type; queue->ops = ops; queue->dev = dev; queue->bufsize = bufsize; return 0; } /* ----------------------------------------------------------------------------- * V4L2 operations */ /** * omap3isp_video_queue_reqbufs - Allocate video buffers memory * * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It * allocated video buffer objects and, for MMAP buffers, buffer memory. * * If the number of buffers is 0, all buffers are freed and the function returns * without performing any allocation. * * If the number of buffers is not 0, currently allocated buffers (if any) are * freed and the requested number of buffers are allocated. Depending on * driver-specific requirements and on memory availability, a number of buffer * smaller or bigger than requested can be allocated. This isn't considered as * an error. * * Return 0 on success or one of the following error codes: * * -EINVAL if the buffer type or index are invalid * -EBUSY if the queue is busy (streaming or buffers mapped) * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition */ int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue, struct v4l2_requestbuffers *rb) { unsigned int nbuffers = rb->count; unsigned int size; int ret; if (rb->type != queue->type) return -EINVAL; queue->ops->queue_prepare(queue, &nbuffers, &size); if (size == 0) return -EINVAL; nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS); mutex_lock(&queue->lock); ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory); if (ret < 0) goto done; rb->count = ret; ret = 0; done: mutex_unlock(&queue->lock); return ret; } /** * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue * * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It * returns the status of a given video buffer. * * Return 0 on success or -EINVAL if the buffer type or index are invalid. */ int omap3isp_video_queue_querybuf(struct isp_video_queue *queue, struct v4l2_buffer *vbuf) { struct isp_video_buffer *buf; int ret = 0; if (vbuf->type != queue->type) return -EINVAL; mutex_lock(&queue->lock); if (vbuf->index >= queue->count) { ret = -EINVAL; goto done; } buf = queue->buffers[vbuf->index]; isp_video_buffer_query(buf, vbuf); done: mutex_unlock(&queue->lock); return ret; } /** * omap3isp_video_queue_qbuf - Queue a buffer * * This function is intended to be used as a VIDIOC_QBUF ioctl handler. * * The v4l2_buffer structure passed from userspace is first sanity tested. If * sane, the buffer is then processed and added to the main queue and, if the * queue is streaming, to the IRQ queue. * * Before being enqueued, USERPTR buffers are checked for address changes. If * the buffer has a different userspace address, the old memory area is unlocked * and the new memory area is locked. */ int omap3isp_video_queue_qbuf(struct isp_video_queue *queue, struct v4l2_buffer *vbuf) { struct isp_video_buffer *buf; unsigned long flags; int ret = -EINVAL; if (vbuf->type != queue->type) goto done; mutex_lock(&queue->lock); if (vbuf->index >= queue->count) goto done; buf = queue->buffers[vbuf->index]; if (vbuf->memory != buf->vbuf.memory) goto done; if (buf->state != ISP_BUF_STATE_IDLE) goto done; if (vbuf->memory == V4L2_MEMORY_USERPTR && vbuf->length < buf->vbuf.length) goto done; if (vbuf->memory == V4L2_MEMORY_USERPTR && vbuf->m.userptr != buf->vbuf.m.userptr) { isp_video_buffer_cleanup(buf); buf->vbuf.m.userptr = vbuf->m.userptr; buf->prepared = 0; } if (!buf->prepared) { ret = isp_video_buffer_prepare(buf); if (ret < 0) goto done; buf->prepared = 1; } isp_video_buffer_cache_sync(buf); buf->state = ISP_BUF_STATE_QUEUED; list_add_tail(&buf->stream, &queue->queue); if (queue->streaming) { spin_lock_irqsave(&queue->irqlock, flags); queue->ops->buffer_queue(buf); spin_unlock_irqrestore(&queue->irqlock, flags); } ret = 0; done: mutex_unlock(&queue->lock); return ret; } /** * omap3isp_video_queue_dqbuf - Dequeue a buffer * * This function is intended to be used as a VIDIOC_DQBUF ioctl handler. * * Wait until a buffer is ready to be dequeued, remove it from the queue and * copy its information to the v4l2_buffer structure. * * If the nonblocking argument is not zero and no buffer is ready, return * -EAGAIN immediately instead of waiting. * * If no buffer has been enqueued, or if the requested buffer type doesn't match * the queue type, return -EINVAL. */ int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue, struct v4l2_buffer *vbuf, int nonblocking) { struct isp_video_buffer *buf; int ret; if (vbuf->type != queue->type) return -EINVAL; mutex_lock(&queue->lock); if (list_empty(&queue->queue)) { ret = -EINVAL; goto done; } buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream); ret = isp_video_buffer_wait(buf, nonblocking); if (ret < 0) goto done; list_del(&buf->stream); isp_video_buffer_query(buf, vbuf); buf->state = ISP_BUF_STATE_IDLE; vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED; done: mutex_unlock(&queue->lock); return ret; } /** * omap3isp_video_queue_streamon - Start streaming * * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It * starts streaming on the queue and calls the buffer_queue operation for all * queued buffers. * * Return 0 on success. */ int omap3isp_video_queue_streamon(struct isp_video_queue *queue) { struct isp_video_buffer *buf; unsigned long flags; mutex_lock(&queue->lock); if (queue->streaming) goto done; queue->streaming = 1; spin_lock_irqsave(&queue->irqlock, flags); list_for_each_entry(buf, &queue->queue, stream) queue->ops->buffer_queue(buf); spin_unlock_irqrestore(&queue->irqlock, flags); done: mutex_unlock(&queue->lock); return 0; } /** * omap3isp_video_queue_streamoff - Stop streaming * * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It * stops streaming on the queue and wakes up all the buffers. * * Drivers must stop the hardware and synchronize with interrupt handlers and/or * delayed works before calling this function to make sure no buffer will be * touched by the driver and/or hardware. */ void omap3isp_video_queue_streamoff(struct isp_video_queue *queue) { struct isp_video_buffer *buf; unsigned long flags; unsigned int i; mutex_lock(&queue->lock); if (!queue->streaming) goto done; queue->streaming = 0; spin_lock_irqsave(&queue->irqlock, flags); for (i = 0; i < queue->count; ++i) { buf = queue->buffers[i]; if (buf->state == ISP_BUF_STATE_ACTIVE) wake_up(&buf->wait); buf->state = ISP_BUF_STATE_IDLE; } spin_unlock_irqrestore(&queue->irqlock, flags); INIT_LIST_HEAD(&queue->queue); done: mutex_unlock(&queue->lock); } /** * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE * * This function is intended to be used with suspend/resume operations. It * discards all 'done' buffers as they would be too old to be requested after * resume. * * Drivers must stop the hardware and synchronize with interrupt handlers and/or * delayed works before calling this function to make sure no buffer will be * touched by the driver and/or hardware. */ void omap3isp_video_queue_discard_done(struct isp_video_queue *queue) { struct isp_video_buffer *buf; unsigned int i; mutex_lock(&queue->lock); if (!queue->streaming) goto done; for (i = 0; i < queue->count; ++i) { buf = queue->buffers[i]; if (buf->state == ISP_BUF_STATE_DONE) buf->state = ISP_BUF_STATE_ERROR; } done: mutex_unlock(&queue->lock); } static void isp_video_queue_vm_open(struct vm_area_struct *vma) { struct isp_video_buffer *buf = vma->vm_private_data; buf->vma_use_count++; } static void isp_video_queue_vm_close(struct vm_area_struct *vma) { struct isp_video_buffer *buf = vma->vm_private_data; buf->vma_use_count--; } static const struct vm_operations_struct isp_video_queue_vm_ops = { .open = isp_video_queue_vm_open, .close = isp_video_queue_vm_close, }; /** * omap3isp_video_queue_mmap - Map buffers to userspace * * This function is intended to be used as an mmap() file operation handler. It * maps a buffer to userspace based on the VMA offset. * * Only buffers of memory type MMAP are supported. */ int omap3isp_video_queue_mmap(struct isp_video_queue *queue, struct vm_area_struct *vma) { struct isp_video_buffer *uninitialized_var(buf); unsigned long size; unsigned int i; int ret = 0; mutex_lock(&queue->lock); for (i = 0; i < queue->count; ++i) { buf = queue->buffers[i]; if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff) break; } if (i == queue->count) { ret = -EINVAL; goto done; } size = vma->vm_end - vma->vm_start; if (buf->vbuf.memory != V4L2_MEMORY_MMAP || size != PAGE_ALIGN(buf->vbuf.length)) { ret = -EINVAL; goto done; } ret = remap_vmalloc_range(vma, buf->vaddr, 0); if (ret < 0) goto done; vma->vm_ops = &isp_video_queue_vm_ops; vma->vm_private_data = buf; isp_video_queue_vm_open(vma); done: mutex_unlock(&queue->lock); return ret; } /** * omap3isp_video_queue_poll - Poll video queue state * * This function is intended to be used as a poll() file operation handler. It * polls the state of the video buffer at the front of the queue and returns an * events mask. * * If no buffer is present at the front of the queue, POLLERR is returned. */ unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue, struct file *file, poll_table *wait) { struct isp_video_buffer *buf; unsigned int mask = 0; mutex_lock(&queue->lock); if (list_empty(&queue->queue)) { mask |= POLLERR; goto done; } buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream); poll_wait(file, &buf->wait, wait); if (buf->state == ISP_BUF_STATE_DONE || buf->state == ISP_BUF_STATE_ERROR) { if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) mask |= POLLIN | POLLRDNORM; else mask |= POLLOUT | POLLWRNORM; } done: mutex_unlock(&queue->lock); return mask; }
gpl-2.0
CyanogenMod/android_kernel_huawei_msm8226
drivers/usb/dwc3/debugfs.c
502
29137
/** * debugfs.c - DesignWare USB3 DRD Controller DebugFS file * * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com * * Authors: Felipe Balbi <balbi@ti.com>, * Sebastian Andrzej Siewior <bigeasy@linutronix.de> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the above-listed copyright holders may not be used * to endorse or promote products derived from this software without * specific prior written permission. * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2, as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/ptrace.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/usb/ch9.h> #include "core.h" #include "gadget.h" #include "io.h" #include "debug.h" #define dump_register(nm) \ { \ .name = __stringify(nm), \ .offset = DWC3_ ##nm - DWC3_GLOBALS_REGS_START, \ } static const struct debugfs_reg32 dwc3_regs[] = { dump_register(GSBUSCFG0), dump_register(GSBUSCFG1), dump_register(GTXTHRCFG), dump_register(GRXTHRCFG), dump_register(GCTL), dump_register(GEVTEN), dump_register(GSTS), dump_register(GSNPSID), dump_register(GGPIO), dump_register(GUID), dump_register(GUCTL), dump_register(GBUSERRADDR0), dump_register(GBUSERRADDR1), dump_register(GPRTBIMAP0), dump_register(GPRTBIMAP1), dump_register(GHWPARAMS0), dump_register(GHWPARAMS1), dump_register(GHWPARAMS2), dump_register(GHWPARAMS3), dump_register(GHWPARAMS4), dump_register(GHWPARAMS5), dump_register(GHWPARAMS6), dump_register(GHWPARAMS7), dump_register(GDBGFIFOSPACE), dump_register(GDBGLTSSM), dump_register(GPRTBIMAP_HS0), dump_register(GPRTBIMAP_HS1), dump_register(GPRTBIMAP_FS0), dump_register(GPRTBIMAP_FS1), dump_register(GUSB2PHYCFG(0)), dump_register(GUSB2PHYCFG(1)), dump_register(GUSB2PHYCFG(2)), dump_register(GUSB2PHYCFG(3)), dump_register(GUSB2PHYCFG(4)), dump_register(GUSB2PHYCFG(5)), dump_register(GUSB2PHYCFG(6)), dump_register(GUSB2PHYCFG(7)), dump_register(GUSB2PHYCFG(8)), dump_register(GUSB2PHYCFG(9)), dump_register(GUSB2PHYCFG(10)), dump_register(GUSB2PHYCFG(11)), dump_register(GUSB2PHYCFG(12)), dump_register(GUSB2PHYCFG(13)), dump_register(GUSB2PHYCFG(14)), dump_register(GUSB2PHYCFG(15)), dump_register(GUSB2I2CCTL(0)), dump_register(GUSB2I2CCTL(1)), dump_register(GUSB2I2CCTL(2)), dump_register(GUSB2I2CCTL(3)), dump_register(GUSB2I2CCTL(4)), dump_register(GUSB2I2CCTL(5)), dump_register(GUSB2I2CCTL(6)), dump_register(GUSB2I2CCTL(7)), dump_register(GUSB2I2CCTL(8)), dump_register(GUSB2I2CCTL(9)), dump_register(GUSB2I2CCTL(10)), dump_register(GUSB2I2CCTL(11)), dump_register(GUSB2I2CCTL(12)), dump_register(GUSB2I2CCTL(13)), dump_register(GUSB2I2CCTL(14)), dump_register(GUSB2I2CCTL(15)), dump_register(GUSB2PHYACC(0)), dump_register(GUSB2PHYACC(1)), dump_register(GUSB2PHYACC(2)), dump_register(GUSB2PHYACC(3)), dump_register(GUSB2PHYACC(4)), dump_register(GUSB2PHYACC(5)), dump_register(GUSB2PHYACC(6)), dump_register(GUSB2PHYACC(7)), dump_register(GUSB2PHYACC(8)), dump_register(GUSB2PHYACC(9)), dump_register(GUSB2PHYACC(10)), dump_register(GUSB2PHYACC(11)), dump_register(GUSB2PHYACC(12)), dump_register(GUSB2PHYACC(13)), dump_register(GUSB2PHYACC(14)), dump_register(GUSB2PHYACC(15)), dump_register(GUSB3PIPECTL(0)), dump_register(GUSB3PIPECTL(1)), dump_register(GUSB3PIPECTL(2)), dump_register(GUSB3PIPECTL(3)), dump_register(GUSB3PIPECTL(4)), dump_register(GUSB3PIPECTL(5)), dump_register(GUSB3PIPECTL(6)), dump_register(GUSB3PIPECTL(7)), dump_register(GUSB3PIPECTL(8)), dump_register(GUSB3PIPECTL(9)), dump_register(GUSB3PIPECTL(10)), dump_register(GUSB3PIPECTL(11)), dump_register(GUSB3PIPECTL(12)), dump_register(GUSB3PIPECTL(13)), dump_register(GUSB3PIPECTL(14)), dump_register(GUSB3PIPECTL(15)), dump_register(GTXFIFOSIZ(0)), dump_register(GTXFIFOSIZ(1)), dump_register(GTXFIFOSIZ(2)), dump_register(GTXFIFOSIZ(3)), dump_register(GTXFIFOSIZ(4)), dump_register(GTXFIFOSIZ(5)), dump_register(GTXFIFOSIZ(6)), dump_register(GTXFIFOSIZ(7)), dump_register(GTXFIFOSIZ(8)), dump_register(GTXFIFOSIZ(9)), dump_register(GTXFIFOSIZ(10)), dump_register(GTXFIFOSIZ(11)), dump_register(GTXFIFOSIZ(12)), dump_register(GTXFIFOSIZ(13)), dump_register(GTXFIFOSIZ(14)), dump_register(GTXFIFOSIZ(15)), dump_register(GTXFIFOSIZ(16)), dump_register(GTXFIFOSIZ(17)), dump_register(GTXFIFOSIZ(18)), dump_register(GTXFIFOSIZ(19)), dump_register(GTXFIFOSIZ(20)), dump_register(GTXFIFOSIZ(21)), dump_register(GTXFIFOSIZ(22)), dump_register(GTXFIFOSIZ(23)), dump_register(GTXFIFOSIZ(24)), dump_register(GTXFIFOSIZ(25)), dump_register(GTXFIFOSIZ(26)), dump_register(GTXFIFOSIZ(27)), dump_register(GTXFIFOSIZ(28)), dump_register(GTXFIFOSIZ(29)), dump_register(GTXFIFOSIZ(30)), dump_register(GTXFIFOSIZ(31)), dump_register(GRXFIFOSIZ(0)), dump_register(GRXFIFOSIZ(1)), dump_register(GRXFIFOSIZ(2)), dump_register(GRXFIFOSIZ(3)), dump_register(GRXFIFOSIZ(4)), dump_register(GRXFIFOSIZ(5)), dump_register(GRXFIFOSIZ(6)), dump_register(GRXFIFOSIZ(7)), dump_register(GRXFIFOSIZ(8)), dump_register(GRXFIFOSIZ(9)), dump_register(GRXFIFOSIZ(10)), dump_register(GRXFIFOSIZ(11)), dump_register(GRXFIFOSIZ(12)), dump_register(GRXFIFOSIZ(13)), dump_register(GRXFIFOSIZ(14)), dump_register(GRXFIFOSIZ(15)), dump_register(GRXFIFOSIZ(16)), dump_register(GRXFIFOSIZ(17)), dump_register(GRXFIFOSIZ(18)), dump_register(GRXFIFOSIZ(19)), dump_register(GRXFIFOSIZ(20)), dump_register(GRXFIFOSIZ(21)), dump_register(GRXFIFOSIZ(22)), dump_register(GRXFIFOSIZ(23)), dump_register(GRXFIFOSIZ(24)), dump_register(GRXFIFOSIZ(25)), dump_register(GRXFIFOSIZ(26)), dump_register(GRXFIFOSIZ(27)), dump_register(GRXFIFOSIZ(28)), dump_register(GRXFIFOSIZ(29)), dump_register(GRXFIFOSIZ(30)), dump_register(GRXFIFOSIZ(31)), dump_register(GEVNTADRLO(0)), dump_register(GEVNTADRHI(0)), dump_register(GEVNTSIZ(0)), dump_register(GEVNTCOUNT(0)), dump_register(GHWPARAMS8), dump_register(GFLADJ), dump_register(DCFG), dump_register(DCTL), dump_register(DEVTEN), dump_register(DSTS), dump_register(DGCMDPAR), dump_register(DGCMD), dump_register(DALEPENA), dump_register(DEPCMDPAR2(0)), dump_register(DEPCMDPAR2(1)), dump_register(DEPCMDPAR2(2)), dump_register(DEPCMDPAR2(3)), dump_register(DEPCMDPAR2(4)), dump_register(DEPCMDPAR2(5)), dump_register(DEPCMDPAR2(6)), dump_register(DEPCMDPAR2(7)), dump_register(DEPCMDPAR2(8)), dump_register(DEPCMDPAR2(9)), dump_register(DEPCMDPAR2(10)), dump_register(DEPCMDPAR2(11)), dump_register(DEPCMDPAR2(12)), dump_register(DEPCMDPAR2(13)), dump_register(DEPCMDPAR2(14)), dump_register(DEPCMDPAR2(15)), dump_register(DEPCMDPAR2(16)), dump_register(DEPCMDPAR2(17)), dump_register(DEPCMDPAR2(18)), dump_register(DEPCMDPAR2(19)), dump_register(DEPCMDPAR2(20)), dump_register(DEPCMDPAR2(21)), dump_register(DEPCMDPAR2(22)), dump_register(DEPCMDPAR2(23)), dump_register(DEPCMDPAR2(24)), dump_register(DEPCMDPAR2(25)), dump_register(DEPCMDPAR2(26)), dump_register(DEPCMDPAR2(27)), dump_register(DEPCMDPAR2(28)), dump_register(DEPCMDPAR2(29)), dump_register(DEPCMDPAR2(30)), dump_register(DEPCMDPAR2(31)), dump_register(DEPCMDPAR1(0)), dump_register(DEPCMDPAR1(1)), dump_register(DEPCMDPAR1(2)), dump_register(DEPCMDPAR1(3)), dump_register(DEPCMDPAR1(4)), dump_register(DEPCMDPAR1(5)), dump_register(DEPCMDPAR1(6)), dump_register(DEPCMDPAR1(7)), dump_register(DEPCMDPAR1(8)), dump_register(DEPCMDPAR1(9)), dump_register(DEPCMDPAR1(10)), dump_register(DEPCMDPAR1(11)), dump_register(DEPCMDPAR1(12)), dump_register(DEPCMDPAR1(13)), dump_register(DEPCMDPAR1(14)), dump_register(DEPCMDPAR1(15)), dump_register(DEPCMDPAR1(16)), dump_register(DEPCMDPAR1(17)), dump_register(DEPCMDPAR1(18)), dump_register(DEPCMDPAR1(19)), dump_register(DEPCMDPAR1(20)), dump_register(DEPCMDPAR1(21)), dump_register(DEPCMDPAR1(22)), dump_register(DEPCMDPAR1(23)), dump_register(DEPCMDPAR1(24)), dump_register(DEPCMDPAR1(25)), dump_register(DEPCMDPAR1(26)), dump_register(DEPCMDPAR1(27)), dump_register(DEPCMDPAR1(28)), dump_register(DEPCMDPAR1(29)), dump_register(DEPCMDPAR1(30)), dump_register(DEPCMDPAR1(31)), dump_register(DEPCMDPAR0(0)), dump_register(DEPCMDPAR0(1)), dump_register(DEPCMDPAR0(2)), dump_register(DEPCMDPAR0(3)), dump_register(DEPCMDPAR0(4)), dump_register(DEPCMDPAR0(5)), dump_register(DEPCMDPAR0(6)), dump_register(DEPCMDPAR0(7)), dump_register(DEPCMDPAR0(8)), dump_register(DEPCMDPAR0(9)), dump_register(DEPCMDPAR0(10)), dump_register(DEPCMDPAR0(11)), dump_register(DEPCMDPAR0(12)), dump_register(DEPCMDPAR0(13)), dump_register(DEPCMDPAR0(14)), dump_register(DEPCMDPAR0(15)), dump_register(DEPCMDPAR0(16)), dump_register(DEPCMDPAR0(17)), dump_register(DEPCMDPAR0(18)), dump_register(DEPCMDPAR0(19)), dump_register(DEPCMDPAR0(20)), dump_register(DEPCMDPAR0(21)), dump_register(DEPCMDPAR0(22)), dump_register(DEPCMDPAR0(23)), dump_register(DEPCMDPAR0(24)), dump_register(DEPCMDPAR0(25)), dump_register(DEPCMDPAR0(26)), dump_register(DEPCMDPAR0(27)), dump_register(DEPCMDPAR0(28)), dump_register(DEPCMDPAR0(29)), dump_register(DEPCMDPAR0(30)), dump_register(DEPCMDPAR0(31)), dump_register(DEPCMD(0)), dump_register(DEPCMD(1)), dump_register(DEPCMD(2)), dump_register(DEPCMD(3)), dump_register(DEPCMD(4)), dump_register(DEPCMD(5)), dump_register(DEPCMD(6)), dump_register(DEPCMD(7)), dump_register(DEPCMD(8)), dump_register(DEPCMD(9)), dump_register(DEPCMD(10)), dump_register(DEPCMD(11)), dump_register(DEPCMD(12)), dump_register(DEPCMD(13)), dump_register(DEPCMD(14)), dump_register(DEPCMD(15)), dump_register(DEPCMD(16)), dump_register(DEPCMD(17)), dump_register(DEPCMD(18)), dump_register(DEPCMD(19)), dump_register(DEPCMD(20)), dump_register(DEPCMD(21)), dump_register(DEPCMD(22)), dump_register(DEPCMD(23)), dump_register(DEPCMD(24)), dump_register(DEPCMD(25)), dump_register(DEPCMD(26)), dump_register(DEPCMD(27)), dump_register(DEPCMD(28)), dump_register(DEPCMD(29)), dump_register(DEPCMD(30)), dump_register(DEPCMD(31)), dump_register(OCFG), dump_register(OCTL), dump_register(OEVTEN), dump_register(OSTS), }; static int dwc3_regdump_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; seq_printf(s, "DesignWare USB3 Core Register Dump\n"); debugfs_print_regs32(s, dwc3_regs, ARRAY_SIZE(dwc3_regs), dwc->regs, ""); return 0; } static int dwc3_regdump_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_regdump_show, inode->i_private); } static const struct file_operations dwc3_regdump_fops = { .open = dwc3_regdump_open, .read = seq_read, .release = single_release, }; static int dwc3_mode_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; unsigned long flags; u32 reg; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GCTL); spin_unlock_irqrestore(&dwc->lock, flags); switch (DWC3_GCTL_PRTCAP(reg)) { case DWC3_GCTL_PRTCAP_HOST: seq_printf(s, "host\n"); break; case DWC3_GCTL_PRTCAP_DEVICE: seq_printf(s, "device\n"); break; case DWC3_GCTL_PRTCAP_OTG: seq_printf(s, "OTG\n"); break; default: seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg)); } return 0; } static int dwc3_mode_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_mode_show, inode->i_private); } static ssize_t dwc3_mode_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; struct dwc3 *dwc = s->private; unsigned long flags; u32 mode = 0; char buf[32]; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; if (!strncmp(buf, "host", 4)) mode |= DWC3_GCTL_PRTCAP_HOST; if (!strncmp(buf, "device", 6)) mode |= DWC3_GCTL_PRTCAP_DEVICE; if (!strncmp(buf, "otg", 3)) mode |= DWC3_GCTL_PRTCAP_OTG; if (mode) { spin_lock_irqsave(&dwc->lock, flags); dwc3_set_mode(dwc, mode); spin_unlock_irqrestore(&dwc->lock, flags); } return count; } static const struct file_operations dwc3_mode_fops = { .open = dwc3_mode_open, .write = dwc3_mode_write, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int dwc3_testmode_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; unsigned long flags; u32 reg; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= DWC3_DCTL_TSTCTRL_MASK; reg >>= 1; spin_unlock_irqrestore(&dwc->lock, flags); switch (reg) { case 0: seq_printf(s, "no test\n"); break; case TEST_J: seq_printf(s, "test_j\n"); break; case TEST_K: seq_printf(s, "test_k\n"); break; case TEST_SE0_NAK: seq_printf(s, "test_se0_nak\n"); break; case TEST_PACKET: seq_printf(s, "test_packet\n"); break; case TEST_FORCE_EN: seq_printf(s, "test_force_enable\n"); break; default: seq_printf(s, "UNKNOWN %d\n", reg); } return 0; } static int dwc3_testmode_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_testmode_show, inode->i_private); } static ssize_t dwc3_testmode_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; struct dwc3 *dwc = s->private; unsigned long flags; u32 testmode = 0; char buf[32]; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; if (!strncmp(buf, "test_j", 6)) testmode = TEST_J; else if (!strncmp(buf, "test_k", 6)) testmode = TEST_K; else if (!strncmp(buf, "test_se0_nak", 12)) testmode = TEST_SE0_NAK; else if (!strncmp(buf, "test_packet", 11)) testmode = TEST_PACKET; else if (!strncmp(buf, "test_force_enable", 17)) testmode = TEST_FORCE_EN; else testmode = 0; spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_set_test_mode(dwc, testmode); spin_unlock_irqrestore(&dwc->lock, flags); return count; } static const struct file_operations dwc3_testmode_fops = { .open = dwc3_testmode_open, .write = dwc3_testmode_write, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int dwc3_link_state_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; unsigned long flags; enum dwc3_link_state state; u32 reg; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_DSTS); state = DWC3_DSTS_USBLNKST(reg); spin_unlock_irqrestore(&dwc->lock, flags); switch (state) { case DWC3_LINK_STATE_U0: seq_printf(s, "U0\n"); break; case DWC3_LINK_STATE_U1: seq_printf(s, "U1\n"); break; case DWC3_LINK_STATE_U2: seq_printf(s, "U2\n"); break; case DWC3_LINK_STATE_U3: seq_printf(s, "U3\n"); break; case DWC3_LINK_STATE_SS_DIS: seq_printf(s, "SS.Disabled\n"); break; case DWC3_LINK_STATE_RX_DET: seq_printf(s, "Rx.Detect\n"); break; case DWC3_LINK_STATE_SS_INACT: seq_printf(s, "SS.Inactive\n"); break; case DWC3_LINK_STATE_POLL: seq_printf(s, "Poll\n"); break; case DWC3_LINK_STATE_RECOV: seq_printf(s, "Recovery\n"); break; case DWC3_LINK_STATE_HRESET: seq_printf(s, "HRESET\n"); break; case DWC3_LINK_STATE_CMPLY: seq_printf(s, "Compliance\n"); break; case DWC3_LINK_STATE_LPBK: seq_printf(s, "Loopback\n"); break; default: seq_printf(s, "UNKNOWN %d\n", reg); } return 0; } static int dwc3_link_state_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_link_state_show, inode->i_private); } static ssize_t dwc3_link_state_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; struct dwc3 *dwc = s->private; unsigned long flags; enum dwc3_link_state state = 0; char buf[32]; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; if (!strncmp(buf, "SS.Disabled", 11)) state = DWC3_LINK_STATE_SS_DIS; else if (!strncmp(buf, "Rx.Detect", 9)) state = DWC3_LINK_STATE_RX_DET; else if (!strncmp(buf, "SS.Inactive", 11)) state = DWC3_LINK_STATE_SS_INACT; else if (!strncmp(buf, "Recovery", 8)) state = DWC3_LINK_STATE_RECOV; else if (!strncmp(buf, "Compliance", 10)) state = DWC3_LINK_STATE_CMPLY; else if (!strncmp(buf, "Loopback", 8)) state = DWC3_LINK_STATE_LPBK; else return -EINVAL; spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_set_link_state(dwc, state); spin_unlock_irqrestore(&dwc->lock, flags); return count; } static const struct file_operations dwc3_link_state_fops = { .open = dwc3_link_state_open, .write = dwc3_link_state_write, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int ep_num; static ssize_t dwc3_store_ep_num(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct seq_file *s = file->private_data; struct dwc3 *dwc = s->private; char kbuf[10]; unsigned int num, dir; unsigned long flags; memset(kbuf, 0, 10); if (copy_from_user(kbuf, ubuf, count > 10 ? 10 : count)) return -EFAULT; if (sscanf(kbuf, "%u %u", &num, &dir) != 2) return -EINVAL; spin_lock_irqsave(&dwc->lock, flags); ep_num = (num << 1) + dir; spin_unlock_irqrestore(&dwc->lock, flags); return count; } static int dwc3_ep_req_list_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; struct dwc3_ep *dep; struct dwc3_request *req = NULL; struct list_head *ptr = NULL; unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); dep = dwc->eps[ep_num]; seq_printf(s, "%s request list: flags: 0x%x\n", dep->name, dep->flags); list_for_each(ptr, &dep->request_list) { req = list_entry(ptr, struct dwc3_request, list); seq_printf(s, "req:0x%p len: %d sts: %d dma:0x%pa num_sgs: %d\n", req, req->request.length, req->request.status, &req->request.dma, req->request.num_sgs); } spin_unlock_irqrestore(&dwc->lock, flags); return 0; } static int dwc3_ep_req_list_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_ep_req_list_show, inode->i_private); } static const struct file_operations dwc3_ep_req_list_fops = { .open = dwc3_ep_req_list_open, .write = dwc3_store_ep_num, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int dwc3_ep_queued_req_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; struct dwc3_ep *dep; struct dwc3_request *req = NULL; struct list_head *ptr = NULL; unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); dep = dwc->eps[ep_num]; seq_printf(s, "%s queued reqs to HW: flags:0x%x\n", dep->name, dep->flags); list_for_each(ptr, &dep->req_queued) { req = list_entry(ptr, struct dwc3_request, list); seq_printf(s, "req:0x%p len:%d sts:%d dma:%pa nsg:%d trb:0x%p\n", req, req->request.length, req->request.status, &req->request.dma, req->request.num_sgs, req->trb); } spin_unlock_irqrestore(&dwc->lock, flags); return 0; } static int dwc3_ep_queued_req_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_ep_queued_req_show, inode->i_private); } const struct file_operations dwc3_ep_req_queued_fops = { .open = dwc3_ep_queued_req_open, .write = dwc3_store_ep_num, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int dwc3_ep_trbs_show(struct seq_file *s, void *unused) { struct dwc3 *dwc = s->private; struct dwc3_ep *dep; struct dwc3_trb *trb; unsigned long flags; int j; if (!ep_num) return 0; spin_lock_irqsave(&dwc->lock, flags); dep = dwc->eps[ep_num]; seq_printf(s, "%s trb pool: flags:0x%x freeslot:%d busyslot:%d\n", dep->name, dep->flags, dep->free_slot, dep->busy_slot); for (j = 0; j < DWC3_TRB_NUM; j++) { trb = &dep->trb_pool[j]; seq_printf(s, "trb:0x%p bph:0x%x bpl:0x%x size:0x%x ctrl: %x\n", trb, trb->bph, trb->bpl, trb->size, trb->ctrl); } spin_unlock_irqrestore(&dwc->lock, flags); return 0; } static int dwc3_ep_trbs_list_open(struct inode *inode, struct file *file) { return single_open(file, dwc3_ep_trbs_show, inode->i_private); } const struct file_operations dwc3_ep_trb_list_fops = { .open = dwc3_ep_trbs_list_open, .write = dwc3_store_ep_num, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static unsigned int ep_addr_rxdbg_mask = 1; module_param(ep_addr_rxdbg_mask, uint, S_IRUGO | S_IWUSR); static unsigned int ep_addr_txdbg_mask = 1; module_param(ep_addr_txdbg_mask, uint, S_IRUGO | S_IWUSR); /* Maximum debug message length */ #define DBG_DATA_MSG 64UL /* Maximum number of messages */ #define DBG_DATA_MAX 128UL static struct { char (buf[DBG_DATA_MAX])[DBG_DATA_MSG]; /* buffer */ unsigned idx; /* index */ unsigned tty; /* print to console? */ rwlock_t lck; /* lock */ } dbg_dwc3_data = { .idx = 0, .tty = 0, .lck = __RW_LOCK_UNLOCKED(lck) }; /** * dbg_dec: decrements debug event index * @idx: buffer index */ static inline void __maybe_unused dbg_dec(unsigned *idx) { *idx = (*idx - 1) % DBG_DATA_MAX; } /** * dbg_inc: increments debug event index * @idx: buffer index */ static inline void dbg_inc(unsigned *idx) { *idx = (*idx + 1) % DBG_DATA_MAX; } #define TIME_BUF_LEN 20 /*get_timestamp - returns time of day in us */ static char *get_timestamp(char *tbuf) { unsigned long long t; unsigned long nanosec_rem; t = cpu_clock(smp_processor_id()); nanosec_rem = do_div(t, 1000000000)/1000; scnprintf(tbuf, TIME_BUF_LEN, "[%5lu.%06lu] ", (unsigned long)t, nanosec_rem); return tbuf; } static int allow_dbg_print(u8 ep_num) { int dir, num; /* allow bus wide events */ if (ep_num == 0xff) return 1; dir = ep_num & 0x1; num = ep_num >> 1; num = 1 << num; if (dir && (num & ep_addr_txdbg_mask)) return 1; if (!dir && (num & ep_addr_rxdbg_mask)) return 1; return 0; } /** * dbg_print: prints the common part of the event * @addr: endpoint address * @name: event name * @status: status * @extra: extra information */ void dbg_print(u8 ep_num, const char *name, int status, const char *extra) { unsigned long flags; char tbuf[TIME_BUF_LEN]; if (!allow_dbg_print(ep_num)) return; write_lock_irqsave(&dbg_dwc3_data.lck, flags); scnprintf(dbg_dwc3_data.buf[dbg_dwc3_data.idx], DBG_DATA_MSG, "%s\t? %02X %-7.7s %4i ?\t%s\n", get_timestamp(tbuf), ep_num, name, status, extra); dbg_inc(&dbg_dwc3_data.idx); write_unlock_irqrestore(&dbg_dwc3_data.lck, flags); if (dbg_dwc3_data.tty != 0) pr_notice("%s\t? %02X %-7.7s %4i ?\t%s\n", get_timestamp(tbuf), ep_num, name, status, extra); } /** * dbg_done: prints a DONE event * @addr: endpoint address * @td: transfer descriptor * @status: status */ void dbg_done(u8 ep_num, const u32 count, int status) { char msg[DBG_DATA_MSG]; if (!allow_dbg_print(ep_num)) return; scnprintf(msg, sizeof(msg), "%d", count); dbg_print(ep_num, "DONE", status, msg); } /** * dbg_event: prints a generic event * @addr: endpoint address * @name: event name * @status: status */ void dbg_event(u8 ep_num, const char *name, int status) { if (!allow_dbg_print(ep_num)) return; if (name != NULL) dbg_print(ep_num, name, status, ""); } /* * dbg_queue: prints a QUEUE event * @addr: endpoint address * @req: USB request * @status: status */ void dbg_queue(u8 ep_num, const struct usb_request *req, int status) { char msg[DBG_DATA_MSG]; if (!allow_dbg_print(ep_num)) return; if (req != NULL) { scnprintf(msg, sizeof(msg), "%d %d", !req->no_interrupt, req->length); dbg_print(ep_num, "QUEUE", status, msg); } } /** * dbg_setup: prints a SETUP event * @addr: endpoint address * @req: setup request */ void dbg_setup(u8 ep_num, const struct usb_ctrlrequest *req) { char msg[DBG_DATA_MSG]; if (!allow_dbg_print(ep_num)) return; if (req != NULL) { scnprintf(msg, sizeof(msg), "%02X %02X %04X %04X %d", req->bRequestType, req->bRequest, le16_to_cpu(req->wValue), le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength)); dbg_print(ep_num, "SETUP", 0, msg); } } /** * dbg_print_reg: prints a reg value * @name: reg name * @reg: reg value to be printed */ void dbg_print_reg(const char *name, int reg) { unsigned long flags; write_lock_irqsave(&dbg_dwc3_data.lck, flags); scnprintf(dbg_dwc3_data.buf[dbg_dwc3_data.idx], DBG_DATA_MSG, "%s = 0x%08x\n", name, reg); dbg_inc(&dbg_dwc3_data.idx); write_unlock_irqrestore(&dbg_dwc3_data.lck, flags); if (dbg_dwc3_data.tty != 0) pr_notice("%s = 0x%08x\n", name, reg); } /** * store_events: configure if events are going to be also printed to console * */ static ssize_t dwc3_store_events(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned tty; if (buf == NULL) { pr_err("[%s] EINVAL\n", __func__); goto done; } if (sscanf(buf, "%u", &tty) != 1 || tty > 1) { pr_err("<1|0>: enable|disable console log\n"); goto done; } dbg_dwc3_data.tty = tty; pr_info("tty = %u", dbg_dwc3_data.tty); done: return count; } static int dwc3_gadget_data_events_show(struct seq_file *s, void *unused) { unsigned long flags; unsigned i; read_lock_irqsave(&dbg_dwc3_data.lck, flags); i = dbg_dwc3_data.idx; if (strnlen(dbg_dwc3_data.buf[i], DBG_DATA_MSG)) seq_printf(s, "%s\n", dbg_dwc3_data.buf[i]); for (dbg_inc(&i); i != dbg_dwc3_data.idx; dbg_inc(&i)) { if (!strnlen(dbg_dwc3_data.buf[i], DBG_DATA_MSG)) continue; seq_printf(s, "%s\n", dbg_dwc3_data.buf[i]); } read_unlock_irqrestore(&dbg_dwc3_data.lck, flags); return 0; } static int dwc3_gadget_data_events_open(struct inode *inode, struct file *f) { return single_open(f, dwc3_gadget_data_events_show, inode->i_private); } const struct file_operations dwc3_gadget_dbg_data_fops = { .open = dwc3_gadget_data_events_open, .read = seq_read, .write = dwc3_store_events, .llseek = seq_lseek, .release = single_release, }; int __devinit dwc3_debugfs_init(struct dwc3 *dwc) { struct dentry *root; struct dentry *file; int ret; root = debugfs_create_dir(dev_name(dwc->dev), NULL); if (!root) { ret = -ENOMEM; goto err0; } dwc->root = root; file = debugfs_create_file("regdump", S_IRUGO, root, dwc, &dwc3_regdump_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root, dwc, &dwc3_mode_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("testmode", S_IRUGO | S_IWUSR, root, dwc, &dwc3_testmode_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, dwc, &dwc3_link_state_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("trbs", S_IRUGO | S_IWUSR, root, dwc, &dwc3_ep_trb_list_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("requests", S_IRUGO | S_IWUSR, root, dwc, &dwc3_ep_req_list_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("queued_reqs", S_IRUGO | S_IWUSR, root, dwc, &dwc3_ep_req_queued_fops); if (!file) { ret = -ENOMEM; goto err1; } file = debugfs_create_file("events", S_IRUGO | S_IWUSR, root, dwc, &dwc3_gadget_dbg_data_fops); if (!file) { ret = -ENOMEM; goto err1; } return 0; err1: debugfs_remove_recursive(root); err0: return ret; } void __devexit dwc3_debugfs_exit(struct dwc3 *dwc) { debugfs_remove_recursive(dwc->root); dwc->root = NULL; }
gpl-2.0
techno/linux-stable-nvmswap
arch/hexagon/kernel/irq_cpu.c
1782
2813
/* * First-level interrupt controller model for Hexagon. * * Copyright (c) 2010-2011 Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/interrupt.h> #include <asm/irq.h> #include <asm/hexagon_vm.h> static void mask_irq(struct irq_data *data) { __vmintop_locdis((long) data->irq); } static void mask_irq_num(unsigned int irq) { __vmintop_locdis((long) irq); } static void unmask_irq(struct irq_data *data) { __vmintop_locen((long) data->irq); } /* This is actually all we need for handle_fasteoi_irq */ static void eoi_irq(struct irq_data *data) { __vmintop_globen((long) data->irq); } /* Power mamangement wake call. We don't need this, however, * if this is absent, then an -ENXIO error is returned to the * msm_serial driver, and it fails to correctly initialize. * This is a bug in the msm_serial driver, but, for now, we * work around it here, by providing this bogus handler. * XXX FIXME!!! remove this when msm_serial is fixed. */ static int set_wake(struct irq_data *data, unsigned int on) { return 0; } static struct irq_chip hexagon_irq_chip = { .name = "HEXAGON", .irq_mask = mask_irq, .irq_unmask = unmask_irq, .irq_set_wake = set_wake, .irq_eoi = eoi_irq }; /** * The hexagon core comes with a first-level interrupt controller * with 32 total possible interrupts. When the core is embedded * into different systems/platforms, it is typically wrapped by * macro cells that provide one or more second-level interrupt * controllers that are cascaded into one or more of the first-level * interrupts handled here. The precise wiring of these other * irqs varies from platform to platform, and are set up & configured * in the platform-specific files. * * The first-level interrupt controller is wrapped by the VM, which * virtualizes the interrupt controller for us. It provides a very * simple, fast & efficient API, and so the fasteoi handler is * appropriate for this case. */ void __init init_IRQ(void) { int irq; for (irq = 0; irq < HEXAGON_CPUINTS; irq++) { mask_irq_num(irq); irq_set_chip_and_handler(irq, &hexagon_irq_chip, handle_fasteoi_irq); } }
gpl-2.0
samm-git/alcatel_ot993D_kernel
drivers/staging/ath6kl/reorder/rcv_aggr.c
2806
19908
/* * * Copyright (c) 2010 Atheros Communications Inc. * All rights reserved. * * // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES // WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR // ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. // // * */ #include <a_config.h> #include <athdefs.h> #include <a_osapi.h> #include <a_debug.h> #include "pkt_log.h" #include "aggr_recv_api.h" #include "aggr_rx_internal.h" #include "wmi.h" extern int wmi_dot3_2_dix(void *osbuf); static void aggr_slice_amsdu(struct aggr_info *p_aggr, struct rxtid *rxtid, void **osbuf); static void aggr_timeout(unsigned long arg); static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid, u16 seq_no, u8 order); static void aggr_dispatch_frames(struct aggr_info *p_aggr, A_NETBUF_QUEUE_T *q); static void * aggr_get_osbuf(struct aggr_info *p_aggr); void * aggr_init(ALLOC_NETBUFS netbuf_allocator) { struct aggr_info *p_aggr = NULL; struct rxtid *rxtid; u8 i; int status = 0; A_PRINTF("In aggr_init..\n"); do { p_aggr = A_MALLOC(sizeof(struct aggr_info)); if(!p_aggr) { A_PRINTF("Failed to allocate memory for aggr_node\n"); status = A_ERROR; break; } /* Init timer and data structures */ A_MEMZERO(p_aggr, sizeof(struct aggr_info)); p_aggr->aggr_sz = AGGR_SZ_DEFAULT; A_INIT_TIMER(&p_aggr->timer, aggr_timeout, p_aggr); p_aggr->timerScheduled = false; A_NETBUF_QUEUE_INIT(&p_aggr->freeQ); p_aggr->netbuf_allocator = netbuf_allocator; p_aggr->netbuf_allocator(&p_aggr->freeQ, AGGR_NUM_OF_FREE_NETBUFS); for(i = 0; i < NUM_OF_TIDS; i++) { rxtid = AGGR_GET_RXTID(p_aggr, i); rxtid->aggr = false; rxtid->progress = false; rxtid->timerMon = false; A_NETBUF_QUEUE_INIT(&rxtid->q); A_MUTEX_INIT(&rxtid->lock); } }while(false); A_PRINTF("going out of aggr_init..status %s\n", (status == 0) ? "OK":"Error"); if (status) { /* Cleanup */ aggr_module_destroy(p_aggr); } return ((status == 0) ? p_aggr : NULL); } /* utility function to clear rx hold_q for a tid */ static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid) { struct rxtid *rxtid; struct rxtid_stats *stats; A_ASSERT(tid < NUM_OF_TIDS && p_aggr); rxtid = AGGR_GET_RXTID(p_aggr, tid); stats = AGGR_GET_RXTID_STATS(p_aggr, tid); if(rxtid->aggr) { aggr_deque_frms(p_aggr, tid, 0, ALL_SEQNO); } rxtid->aggr = false; rxtid->progress = false; rxtid->timerMon = false; rxtid->win_sz = 0; rxtid->seq_next = 0; rxtid->hold_q_sz = 0; if(rxtid->hold_q) { kfree(rxtid->hold_q); rxtid->hold_q = NULL; } A_MEMZERO(stats, sizeof(struct rxtid_stats)); } void aggr_module_destroy(void *cntxt) { struct aggr_info *p_aggr = (struct aggr_info *)cntxt; struct rxtid *rxtid; u8 i, k; A_PRINTF("%s(): aggr = %p\n",_A_FUNCNAME_, p_aggr); A_ASSERT(p_aggr); if(p_aggr) { if(p_aggr->timerScheduled) { A_UNTIMEOUT(&p_aggr->timer); p_aggr->timerScheduled = false; } for(i = 0; i < NUM_OF_TIDS; i++) { rxtid = AGGR_GET_RXTID(p_aggr, i); /* Free the hold q contents and hold_q*/ if(rxtid->hold_q) { for(k = 0; k< rxtid->hold_q_sz; k++) { if(rxtid->hold_q[k].osbuf) { A_NETBUF_FREE(rxtid->hold_q[k].osbuf); } } kfree(rxtid->hold_q); } /* Free the dispatch q contents*/ while(A_NETBUF_QUEUE_SIZE(&rxtid->q)) { A_NETBUF_FREE(A_NETBUF_DEQUEUE(&rxtid->q)); } if (A_IS_MUTEX_VALID(&rxtid->lock)) { A_MUTEX_DELETE(&rxtid->lock); } } /* free the freeQ and its contents*/ while(A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ)) { A_NETBUF_FREE(A_NETBUF_DEQUEUE(&p_aggr->freeQ)); } kfree(p_aggr); } A_PRINTF("out aggr_module_destroy\n"); } void aggr_register_rx_dispatcher(void *cntxt, void * dev, RX_CALLBACK fn) { struct aggr_info *p_aggr = (struct aggr_info *)cntxt; A_ASSERT(p_aggr && fn && dev); p_aggr->rx_fn = fn; p_aggr->dev = dev; } void aggr_process_bar(void *cntxt, u8 tid, u16 seq_no) { struct aggr_info *p_aggr = (struct aggr_info *)cntxt; struct rxtid_stats *stats; A_ASSERT(p_aggr); stats = AGGR_GET_RXTID_STATS(p_aggr, tid); stats->num_bar++; aggr_deque_frms(p_aggr, tid, seq_no, ALL_SEQNO); } void aggr_recv_addba_req_evt(void *cntxt, u8 tid, u16 seq_no, u8 win_sz) { struct aggr_info *p_aggr = (struct aggr_info *)cntxt; struct rxtid *rxtid; struct rxtid_stats *stats; A_ASSERT(p_aggr); rxtid = AGGR_GET_RXTID(p_aggr, tid); stats = AGGR_GET_RXTID_STATS(p_aggr, tid); A_PRINTF("%s(): win_sz = %d aggr %d\n", _A_FUNCNAME_, win_sz, rxtid->aggr); if(win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX) { A_PRINTF("win_sz %d, tid %d\n", win_sz, tid); } if(rxtid->aggr) { /* Just go and deliver all the frames up from this * queue, as if we got DELBA and re-initialize the queue */ aggr_delete_tid_state(p_aggr, tid); } rxtid->seq_next = seq_no; /* create these queues, only upon receiving of ADDBA for a * tid, reducing memory requirement */ rxtid->hold_q = A_MALLOC(HOLD_Q_SZ(win_sz)); if((rxtid->hold_q == NULL)) { A_PRINTF("Failed to allocate memory, tid = %d\n", tid); A_ASSERT(0); } A_MEMZERO(rxtid->hold_q, HOLD_Q_SZ(win_sz)); /* Update rxtid for the window sz */ rxtid->win_sz = win_sz; /* hold_q_sz inicates the depth of holding q - which is * a factor of win_sz. Compute once, as it will be used often */ rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz); /* There should be no frames on q - even when second ADDBA comes in. * If aggr was previously ON on this tid, we would have cleaned up * the q */ if(A_NETBUF_QUEUE_SIZE(&rxtid->q) != 0) { A_PRINTF("ERROR: Frames still on queue ?\n"); A_ASSERT(0); } rxtid->aggr = true; } void aggr_recv_delba_req_evt(void *cntxt, u8 tid) { struct aggr_info *p_aggr = (struct aggr_info *)cntxt; struct rxtid *rxtid; A_ASSERT(p_aggr); A_PRINTF("%s(): tid %d\n", _A_FUNCNAME_, tid); rxtid = AGGR_GET_RXTID(p_aggr, tid); if(rxtid->aggr) { aggr_delete_tid_state(p_aggr, tid); } } static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid, u16 seq_no, u8 order) { struct rxtid *rxtid; struct osbuf_hold_q *node; u16 idx, idx_end, seq_end; struct rxtid_stats *stats; A_ASSERT(p_aggr); rxtid = AGGR_GET_RXTID(p_aggr, tid); stats = AGGR_GET_RXTID_STATS(p_aggr, tid); /* idx is absolute location for first frame */ idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); /* idx_end is typically the last possible frame in the window, * but changes to 'the' seq_no, when BAR comes. If seq_no * is non-zero, we will go up to that and stop. * Note: last seq no in current window will occupy the same * index position as index that is just previous to start. * An imp point : if win_sz is 7, for seq_no space of 4095, * then, there would be holes when sequence wrap around occurs. * Target should judiciously choose the win_sz, based on * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz * 2, 4, 8, 16 win_sz works fine). * We must deque from "idx" to "idx_end", including both. */ seq_end = (seq_no) ? seq_no : rxtid->seq_next; idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz); /* Critical section begins */ A_MUTEX_LOCK(&rxtid->lock); do { node = &rxtid->hold_q[idx]; if((order == CONTIGUOUS_SEQNO) && (!node->osbuf)) break; /* chain frames and deliver frames bcos: * 1. either the frames are in order and window is contiguous, OR * 2. we need to deque frames, irrespective of holes */ if(node->osbuf) { if(node->is_amsdu) { aggr_slice_amsdu(p_aggr, rxtid, &node->osbuf); } else { A_NETBUF_ENQUEUE(&rxtid->q, node->osbuf); } node->osbuf = NULL; } else { stats->num_hole++; } /* window is moving */ rxtid->seq_next = IEEE80211_NEXT_SEQ_NO(rxtid->seq_next); idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz); } while(idx != idx_end); /* Critical section ends */ A_MUTEX_UNLOCK(&rxtid->lock); stats->num_delivered += A_NETBUF_QUEUE_SIZE(&rxtid->q); aggr_dispatch_frames(p_aggr, &rxtid->q); } static void * aggr_get_osbuf(struct aggr_info *p_aggr) { void *buf = NULL; /* Starving for buffers? get more from OS * check for low netbuffers( < 1/4 AGGR_NUM_OF_FREE_NETBUFS) : * re-allocate bufs if so * allocate a free buf from freeQ */ if (A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ) < (AGGR_NUM_OF_FREE_NETBUFS >> 2)) { p_aggr->netbuf_allocator(&p_aggr->freeQ, AGGR_NUM_OF_FREE_NETBUFS); } if (A_NETBUF_QUEUE_SIZE(&p_aggr->freeQ)) { buf = A_NETBUF_DEQUEUE(&p_aggr->freeQ); } return buf; } static void aggr_slice_amsdu(struct aggr_info *p_aggr, struct rxtid *rxtid, void **osbuf) { void *new_buf; u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len; u8 *framep; /* Frame format at this point: * [DIX hdr | 802.3 | 802.3 | ... | 802.3] * * Strip the DIX header. * Iterate through the osbuf and do: * grab a free netbuf from freeQ * find the start and end of a frame * copy it to netbuf(Vista can do better here) * convert all msdu's(802.3) frames to upper layer format - os routine * -for now lets convert from 802.3 to dix * enque this to dispatch q of tid * repeat * free the osbuf - to OS. It's been sliced. */ mac_hdr_len = sizeof(ATH_MAC_HDR); framep = A_NETBUF_DATA(*osbuf) + mac_hdr_len; amsdu_len = A_NETBUF_LEN(*osbuf) - mac_hdr_len; while(amsdu_len > mac_hdr_len) { /* Begin of a 802.3 frame */ payload_8023_len = A_BE2CPU16(((ATH_MAC_HDR *)framep)->typeOrLen); #define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508 #define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46 if(payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN || payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) { A_PRINTF("802.3 AMSDU frame bound check failed. len %d\n", payload_8023_len); break; } frame_8023_len = payload_8023_len + mac_hdr_len; new_buf = aggr_get_osbuf(p_aggr); if(new_buf == NULL) { A_PRINTF("No buffer available \n"); break; } memcpy(A_NETBUF_DATA(new_buf), framep, frame_8023_len); A_NETBUF_PUT(new_buf, frame_8023_len); if (wmi_dot3_2_dix(new_buf) != 0) { A_PRINTF("dot3_2_dix err..\n"); A_NETBUF_FREE(new_buf); break; } A_NETBUF_ENQUEUE(&rxtid->q, new_buf); /* Is this the last subframe within this aggregate ? */ if ((amsdu_len - frame_8023_len) == 0) { break; } /* Add the length of A-MSDU subframe padding bytes - * Round to nearest word. */ frame_8023_len = ((frame_8023_len + 3) & ~3); framep += frame_8023_len; amsdu_len -= frame_8023_len; } A_NETBUF_FREE(*osbuf); *osbuf = NULL; } void aggr_process_recv_frm(void *cntxt, u8 tid, u16 seq_no, bool is_amsdu, void **osbuf) { struct aggr_info *p_aggr = (struct aggr_info *)cntxt; struct rxtid *rxtid; struct rxtid_stats *stats; u16 idx, st, cur, end; u16 *log_idx; struct osbuf_hold_q *node; PACKET_LOG *log; A_ASSERT(p_aggr); A_ASSERT(tid < NUM_OF_TIDS); rxtid = AGGR_GET_RXTID(p_aggr, tid); stats = AGGR_GET_RXTID_STATS(p_aggr, tid); stats->num_into_aggr++; if(!rxtid->aggr) { if(is_amsdu) { aggr_slice_amsdu(p_aggr, rxtid, osbuf); stats->num_amsdu++; aggr_dispatch_frames(p_aggr, &rxtid->q); } return; } /* Check the incoming sequence no, if it's in the window */ st = rxtid->seq_next; cur = seq_no; end = (st + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO; /* Log the pkt info for future analysis */ log = &p_aggr->pkt_log; log_idx = &log->last_idx; log->info[*log_idx].cur = cur; log->info[*log_idx].st = st; log->info[*log_idx].end = end; *log_idx = IEEE80211_NEXT_SEQ_NO(*log_idx); if(((st < end) && (cur < st || cur > end)) || ((st > end) && (cur > end) && (cur < st))) { /* the cur frame is outside the window. Since we know * our target would not do this without reason it must * be assumed that the window has moved for some valid reason. * Therefore, we dequeue all frames and start fresh. */ u16 extended_end; extended_end = (end + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO; if(((end < extended_end) && (cur < end || cur > extended_end)) || ((end > extended_end) && (cur > extended_end) && (cur < end))) { // dequeue all frames in queue and shift window to new frame aggr_deque_frms(p_aggr, tid, 0, ALL_SEQNO); //set window start so that new frame is last frame in window if(cur >= rxtid->hold_q_sz-1) { rxtid->seq_next = cur - (rxtid->hold_q_sz-1); }else{ rxtid->seq_next = IEEE80211_MAX_SEQ_NO - (rxtid->hold_q_sz-2 - cur); } } else { // dequeue only those frames that are outside the new shifted window if(cur >= rxtid->hold_q_sz-1) { st = cur - (rxtid->hold_q_sz-1); }else{ st = IEEE80211_MAX_SEQ_NO - (rxtid->hold_q_sz-2 - cur); } aggr_deque_frms(p_aggr, tid, st, ALL_SEQNO); } stats->num_oow++; } idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz); /*enque the frame, in hold_q */ node = &rxtid->hold_q[idx]; A_MUTEX_LOCK(&rxtid->lock); if(node->osbuf) { /* Is the cur frame duplicate or something beyond our * window(hold_q -> which is 2x, already)? * 1. Duplicate is easy - drop incoming frame. * 2. Not falling in current sliding window. * 2a. is the frame_seq_no preceding current tid_seq_no? * -> drop the frame. perhaps sender did not get our ACK. * this is taken care of above. * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ); * -> Taken care of it above, by moving window forward. * */ A_NETBUF_FREE(node->osbuf); stats->num_dups++; } node->osbuf = *osbuf; node->is_amsdu = is_amsdu; node->seq_no = seq_no; if(node->is_amsdu) { stats->num_amsdu++; } else { stats->num_mpdu++; } A_MUTEX_UNLOCK(&rxtid->lock); *osbuf = NULL; aggr_deque_frms(p_aggr, tid, 0, CONTIGUOUS_SEQNO); if(p_aggr->timerScheduled) { rxtid->progress = true; }else{ for(idx=0 ; idx<rxtid->hold_q_sz ; idx++) { if(rxtid->hold_q[idx].osbuf) { /* there is a frame in the queue and no timer so * start a timer to ensure that the frame doesn't remain * stuck forever. */ p_aggr->timerScheduled = true; A_TIMEOUT_MS(&p_aggr->timer, AGGR_RX_TIMEOUT, 0); rxtid->progress = false; rxtid->timerMon = true; break; } } } } /* * aggr_reset_state -- Called when it is deemed necessary to clear the aggregate * hold Q state. Examples include when a Connect event or disconnect event is * received. */ void aggr_reset_state(void *cntxt) { u8 tid; struct aggr_info *p_aggr = (struct aggr_info *)cntxt; A_ASSERT(p_aggr); for(tid=0 ; tid<NUM_OF_TIDS ; tid++) { aggr_delete_tid_state(p_aggr, tid); } } static void aggr_timeout(unsigned long arg) { u8 i,j; struct aggr_info *p_aggr = (struct aggr_info *)arg; struct rxtid *rxtid; struct rxtid_stats *stats; /* * If the q for which the timer was originally started has * not progressed then it is necessary to dequeue all the * contained frames so that they are not held forever. */ for(i = 0; i < NUM_OF_TIDS; i++) { rxtid = AGGR_GET_RXTID(p_aggr, i); stats = AGGR_GET_RXTID_STATS(p_aggr, i); if(rxtid->aggr == false || rxtid->timerMon == false || rxtid->progress == true) { continue; } // dequeue all frames in for this tid stats->num_timeouts++; A_PRINTF("TO: st %d end %d\n", rxtid->seq_next, ((rxtid->seq_next + rxtid->hold_q_sz-1) & IEEE80211_MAX_SEQ_NO)); aggr_deque_frms(p_aggr, i, 0, ALL_SEQNO); } p_aggr->timerScheduled = false; // determine whether a new timer should be started. for(i = 0; i < NUM_OF_TIDS; i++) { rxtid = AGGR_GET_RXTID(p_aggr, i); if(rxtid->aggr == true && rxtid->hold_q) { for(j = 0 ; j < rxtid->hold_q_sz ; j++) { if(rxtid->hold_q[j].osbuf) { p_aggr->timerScheduled = true; rxtid->timerMon = true; rxtid->progress = false; break; } } if(j >= rxtid->hold_q_sz) { rxtid->timerMon = false; } } } if(p_aggr->timerScheduled) { /* Rearm the timer*/ A_TIMEOUT_MS(&p_aggr->timer, AGGR_RX_TIMEOUT, 0); } } static void aggr_dispatch_frames(struct aggr_info *p_aggr, A_NETBUF_QUEUE_T *q) { void *osbuf; while((osbuf = A_NETBUF_DEQUEUE(q))) { p_aggr->rx_fn(p_aggr->dev, osbuf); } } void aggr_dump_stats(void *cntxt, PACKET_LOG **log_buf) { struct aggr_info *p_aggr = (struct aggr_info *)cntxt; struct rxtid *rxtid; struct rxtid_stats *stats; u8 i; *log_buf = &p_aggr->pkt_log; A_PRINTF("\n\n================================================\n"); A_PRINTF("tid: num_into_aggr, dups, oow, mpdu, amsdu, delivered, timeouts, holes, bar, seq_next\n"); for(i = 0; i < NUM_OF_TIDS; i++) { stats = AGGR_GET_RXTID_STATS(p_aggr, i); rxtid = AGGR_GET_RXTID(p_aggr, i); A_PRINTF("%d: %d %d %d %d %d %d %d %d %d : %d\n", i, stats->num_into_aggr, stats->num_dups, stats->num_oow, stats->num_mpdu, stats->num_amsdu, stats->num_delivered, stats->num_timeouts, stats->num_hole, stats->num_bar, rxtid->seq_next); } A_PRINTF("================================================\n\n"); }
gpl-2.0
cherifyass/s4-gpe-kernel
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
3318
99874
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Toplevel file. Relies on dhd_linux.c to send commands to the dongle. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/if_arp.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/netdevice.h> #include <linux/bitops.h> #include <linux/etherdevice.h> #include <linux/ieee80211.h> #include <linux/uaccess.h> #include <net/cfg80211.h> #include <brcmu_utils.h> #include <defs.h> #include <brcmu_wifi.h> #include "dhd.h" #include "wl_cfg80211.h" #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255}; static u32 brcmf_dbg_level = WL_DBG_ERR; static void brcmf_set_drvdata(struct brcmf_cfg80211_dev *dev, void *data) { dev->driver_data = data; } static void *brcmf_get_drvdata(struct brcmf_cfg80211_dev *dev) { void *data = NULL; if (dev) data = dev->driver_data; return data; } static struct brcmf_cfg80211_priv *brcmf_priv_get(struct brcmf_cfg80211_dev *cfg_dev) { struct brcmf_cfg80211_iface *ci = brcmf_get_drvdata(cfg_dev); return ci->cfg_priv; } static bool check_sys_up(struct wiphy *wiphy) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) { WL_INFO("device is not ready : status (%d)\n", (int)cfg_priv->status); return false; } return true; } #define CHAN2G(_channel, _freq, _flags) { \ .band = IEEE80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } #define CHAN5G(_channel, _flags) { \ .band = IEEE80211_BAND_5GHZ, \ .center_freq = 5000 + (5 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } #define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2) #define RATETAB_ENT(_rateid, _flags) \ { \ .bitrate = RATE_TO_BASE100KBPS(_rateid), \ .hw_value = (_rateid), \ .flags = (_flags), \ } static struct ieee80211_rate __wl_rates[] = { RATETAB_ENT(BRCM_RATE_1M, 0), RATETAB_ENT(BRCM_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE), RATETAB_ENT(BRCM_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE), RATETAB_ENT(BRCM_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE), RATETAB_ENT(BRCM_RATE_6M, 0), RATETAB_ENT(BRCM_RATE_9M, 0), RATETAB_ENT(BRCM_RATE_12M, 0), RATETAB_ENT(BRCM_RATE_18M, 0), RATETAB_ENT(BRCM_RATE_24M, 0), RATETAB_ENT(BRCM_RATE_36M, 0), RATETAB_ENT(BRCM_RATE_48M, 0), RATETAB_ENT(BRCM_RATE_54M, 0), }; #define wl_a_rates (__wl_rates + 4) #define wl_a_rates_size 8 #define wl_g_rates (__wl_rates + 0) #define wl_g_rates_size 12 static struct ieee80211_channel __wl_2ghz_channels[] = { CHAN2G(1, 2412, 0), CHAN2G(2, 2417, 0), CHAN2G(3, 2422, 0), CHAN2G(4, 2427, 0), CHAN2G(5, 2432, 0), CHAN2G(6, 2437, 0), CHAN2G(7, 2442, 0), CHAN2G(8, 2447, 0), CHAN2G(9, 2452, 0), CHAN2G(10, 2457, 0), CHAN2G(11, 2462, 0), CHAN2G(12, 2467, 0), CHAN2G(13, 2472, 0), CHAN2G(14, 2484, 0), }; static struct ieee80211_channel __wl_5ghz_a_channels[] = { CHAN5G(34, 0), CHAN5G(36, 0), CHAN5G(38, 0), CHAN5G(40, 0), CHAN5G(42, 0), CHAN5G(44, 0), CHAN5G(46, 0), CHAN5G(48, 0), CHAN5G(52, 0), CHAN5G(56, 0), CHAN5G(60, 0), CHAN5G(64, 0), CHAN5G(100, 0), CHAN5G(104, 0), CHAN5G(108, 0), CHAN5G(112, 0), CHAN5G(116, 0), CHAN5G(120, 0), CHAN5G(124, 0), CHAN5G(128, 0), CHAN5G(132, 0), CHAN5G(136, 0), CHAN5G(140, 0), CHAN5G(149, 0), CHAN5G(153, 0), CHAN5G(157, 0), CHAN5G(161, 0), CHAN5G(165, 0), CHAN5G(184, 0), CHAN5G(188, 0), CHAN5G(192, 0), CHAN5G(196, 0), CHAN5G(200, 0), CHAN5G(204, 0), CHAN5G(208, 0), CHAN5G(212, 0), CHAN5G(216, 0), }; static struct ieee80211_channel __wl_5ghz_n_channels[] = { CHAN5G(32, 0), CHAN5G(34, 0), CHAN5G(36, 0), CHAN5G(38, 0), CHAN5G(40, 0), CHAN5G(42, 0), CHAN5G(44, 0), CHAN5G(46, 0), CHAN5G(48, 0), CHAN5G(50, 0), CHAN5G(52, 0), CHAN5G(54, 0), CHAN5G(56, 0), CHAN5G(58, 0), CHAN5G(60, 0), CHAN5G(62, 0), CHAN5G(64, 0), CHAN5G(66, 0), CHAN5G(68, 0), CHAN5G(70, 0), CHAN5G(72, 0), CHAN5G(74, 0), CHAN5G(76, 0), CHAN5G(78, 0), CHAN5G(80, 0), CHAN5G(82, 0), CHAN5G(84, 0), CHAN5G(86, 0), CHAN5G(88, 0), CHAN5G(90, 0), CHAN5G(92, 0), CHAN5G(94, 0), CHAN5G(96, 0), CHAN5G(98, 0), CHAN5G(100, 0), CHAN5G(102, 0), CHAN5G(104, 0), CHAN5G(106, 0), CHAN5G(108, 0), CHAN5G(110, 0), CHAN5G(112, 0), CHAN5G(114, 0), CHAN5G(116, 0), CHAN5G(118, 0), CHAN5G(120, 0), CHAN5G(122, 0), CHAN5G(124, 0), CHAN5G(126, 0), CHAN5G(128, 0), CHAN5G(130, 0), CHAN5G(132, 0), CHAN5G(134, 0), CHAN5G(136, 0), CHAN5G(138, 0), CHAN5G(140, 0), CHAN5G(142, 0), CHAN5G(144, 0), CHAN5G(145, 0), CHAN5G(146, 0), CHAN5G(147, 0), CHAN5G(148, 0), CHAN5G(149, 0), CHAN5G(150, 0), CHAN5G(151, 0), CHAN5G(152, 0), CHAN5G(153, 0), CHAN5G(154, 0), CHAN5G(155, 0), CHAN5G(156, 0), CHAN5G(157, 0), CHAN5G(158, 0), CHAN5G(159, 0), CHAN5G(160, 0), CHAN5G(161, 0), CHAN5G(162, 0), CHAN5G(163, 0), CHAN5G(164, 0), CHAN5G(165, 0), CHAN5G(166, 0), CHAN5G(168, 0), CHAN5G(170, 0), CHAN5G(172, 0), CHAN5G(174, 0), CHAN5G(176, 0), CHAN5G(178, 0), CHAN5G(180, 0), CHAN5G(182, 0), CHAN5G(184, 0), CHAN5G(186, 0), CHAN5G(188, 0), CHAN5G(190, 0), CHAN5G(192, 0), CHAN5G(194, 0), CHAN5G(196, 0), CHAN5G(198, 0), CHAN5G(200, 0), CHAN5G(202, 0), CHAN5G(204, 0), CHAN5G(206, 0), CHAN5G(208, 0), CHAN5G(210, 0), CHAN5G(212, 0), CHAN5G(214, 0), CHAN5G(216, 0), CHAN5G(218, 0), CHAN5G(220, 0), CHAN5G(222, 0), CHAN5G(224, 0), CHAN5G(226, 0), CHAN5G(228, 0), }; static struct ieee80211_supported_band __wl_band_2ghz = { .band = IEEE80211_BAND_2GHZ, .channels = __wl_2ghz_channels, .n_channels = ARRAY_SIZE(__wl_2ghz_channels), .bitrates = wl_g_rates, .n_bitrates = wl_g_rates_size, }; static struct ieee80211_supported_band __wl_band_5ghz_a = { .band = IEEE80211_BAND_5GHZ, .channels = __wl_5ghz_a_channels, .n_channels = ARRAY_SIZE(__wl_5ghz_a_channels), .bitrates = wl_a_rates, .n_bitrates = wl_a_rates_size, }; static struct ieee80211_supported_band __wl_band_5ghz_n = { .band = IEEE80211_BAND_5GHZ, .channels = __wl_5ghz_n_channels, .n_channels = ARRAY_SIZE(__wl_5ghz_n_channels), .bitrates = wl_a_rates, .n_bitrates = wl_a_rates_size, }; static const u32 __wl_cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_AES_CMAC, }; /* tag_ID/length/value_buffer tuple */ struct brcmf_tlv { u8 id; u8 len; u8 data[1]; }; /* Quarter dBm units to mW * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 * Table is offset so the last entry is largest mW value that fits in * a u16. */ #define QDBM_OFFSET 153 /* Offset for first entry */ #define QDBM_TABLE_LEN 40 /* Table size */ /* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 */ #define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ /* Largest mW value that will round down to the last table entry, * QDBM_OFFSET + QDBM_TABLE_LEN-1. * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + * mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. */ #define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ static const u16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { /* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ /* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, /* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, /* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, /* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, /* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 }; static u16 brcmf_qdbm_to_mw(u8 qdbm) { uint factor = 1; int idx = qdbm - QDBM_OFFSET; if (idx >= QDBM_TABLE_LEN) /* clamp to max u16 mW value */ return 0xFFFF; /* scale the qdBm index up to the range of the table 0-40 * where an offset of 40 qdBm equals a factor of 10 mW. */ while (idx < 0) { idx += 40; factor *= 10; } /* return the mW value scaled down to the correct factor of 10, * adding in factor/2 to get proper rounding. */ return (nqdBm_to_mW_map[idx] + factor / 2) / factor; } static u8 brcmf_mw_to_qdbm(u16 mw) { u8 qdbm; int offset; uint mw_uint = mw; uint boundary; /* handle boundary case */ if (mw_uint <= 1) return 0; offset = QDBM_OFFSET; /* move mw into the range of the table */ while (mw_uint < QDBM_TABLE_LOW_BOUND) { mw_uint *= 10; offset -= 40; } for (qdbm = 0; qdbm < QDBM_TABLE_LEN - 1; qdbm++) { boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm + 1] - nqdBm_to_mW_map[qdbm]) / 2; if (mw_uint < boundary) break; } qdbm += (u8) offset; return qdbm; } /* function for reading/writing a single u32 from/to the dongle */ static int brcmf_exec_dcmd_u32(struct net_device *ndev, u32 cmd, u32 *par) { int err; __le32 par_le = cpu_to_le32(*par); err = brcmf_exec_dcmd(ndev, cmd, &par_le, sizeof(__le32)); *par = le32_to_cpu(par_le); return err; } static void convert_key_from_CPU(struct brcmf_wsec_key *key, struct brcmf_wsec_key_le *key_le) { key_le->index = cpu_to_le32(key->index); key_le->len = cpu_to_le32(key->len); key_le->algo = cpu_to_le32(key->algo); key_le->flags = cpu_to_le32(key->flags); key_le->rxiv.hi = cpu_to_le32(key->rxiv.hi); key_le->rxiv.lo = cpu_to_le16(key->rxiv.lo); key_le->iv_initialized = cpu_to_le32(key->iv_initialized); memcpy(key_le->data, key->data, sizeof(key->data)); memcpy(key_le->ea, key->ea, sizeof(key->ea)); } static int send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key) { int err; struct brcmf_wsec_key_le key_le; convert_key_from_CPU(key, &key_le); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le, sizeof(key_le)); if (err) WL_ERR("WLC_SET_KEY error (%d)\n", err); return err; } static s32 brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct wireless_dev *wdev; s32 infra = 0; s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; switch (type) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_WDS: WL_ERR("type (%d) : currently we do not support this type\n", type); return -EOPNOTSUPP; case NL80211_IFTYPE_ADHOC: cfg_priv->conf->mode = WL_MODE_IBSS; infra = 0; break; case NL80211_IFTYPE_STATION: cfg_priv->conf->mode = WL_MODE_BSS; infra = 1; break; default: err = -EINVAL; goto done; } err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra); if (err) { WL_ERR("WLC_SET_INFRA error (%d)\n", err); err = -EAGAIN; } else { wdev = ndev->ieee80211_ptr; wdev->iftype = type; } WL_INFO("IF Type = %s\n", (cfg_priv->conf->mode == WL_MODE_IBSS) ? "Adhoc" : "Infra"); done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_dev_intvar_set(struct net_device *ndev, s8 *name, s32 val) { s8 buf[BRCMF_DCMD_SMLEN]; u32 len; s32 err = 0; __le32 val_le; val_le = cpu_to_le32(val); len = brcmf_c_mkiovar(name, (char *)(&val_le), sizeof(val_le), buf, sizeof(buf)); BUG_ON(!len); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len); if (err) WL_ERR("error (%d)\n", err); return err; } static s32 brcmf_dev_intvar_get(struct net_device *ndev, s8 *name, s32 *retval) { union { s8 buf[BRCMF_DCMD_SMLEN]; __le32 val; } var; u32 len; u32 data_null; s32 err = 0; len = brcmf_c_mkiovar(name, (char *)(&data_null), 0, (char *)(&var), sizeof(var.buf)); BUG_ON(!len); err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, &var, len); if (err) WL_ERR("error (%d)\n", err); *retval = le32_to_cpu(var.val); return err; } static void brcmf_set_mpc(struct net_device *ndev, int mpc) { s32 err = 0; struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); if (test_bit(WL_STATUS_READY, &cfg_priv->status)) { err = brcmf_dev_intvar_set(ndev, "mpc", mpc); if (err) { WL_ERR("fail to set mpc\n"); return; } WL_INFO("MPC : %d\n", mpc); } } static void wl_iscan_prep(struct brcmf_scan_params_le *params_le, struct brcmf_ssid *ssid) { memcpy(params_le->bssid, ether_bcast, ETH_ALEN); params_le->bss_type = DOT11_BSSTYPE_ANY; params_le->scan_type = 0; params_le->channel_num = 0; params_le->nprobes = cpu_to_le32(-1); params_le->active_time = cpu_to_le32(-1); params_le->passive_time = cpu_to_le32(-1); params_le->home_time = cpu_to_le32(-1); if (ssid && ssid->SSID_len) memcpy(&params_le->ssid_le, ssid, sizeof(struct brcmf_ssid)); } static s32 brcmf_dev_iovar_setbuf(struct net_device *ndev, s8 * iovar, void *param, s32 paramlen, void *bufptr, s32 buflen) { s32 iolen; iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen); BUG_ON(!iolen); return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, bufptr, iolen); } static s32 brcmf_dev_iovar_getbuf(struct net_device *ndev, s8 * iovar, void *param, s32 paramlen, void *bufptr, s32 buflen) { s32 iolen; iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen); BUG_ON(!iolen); return brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, bufptr, buflen); } static s32 brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan, struct brcmf_ssid *ssid, u16 action) { s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE + offsetof(struct brcmf_iscan_params_le, params_le); struct brcmf_iscan_params_le *params; s32 err = 0; if (ssid && ssid->SSID_len) params_size += sizeof(struct brcmf_ssid); params = kzalloc(params_size, GFP_KERNEL); if (!params) return -ENOMEM; BUG_ON(params_size >= BRCMF_DCMD_SMLEN); wl_iscan_prep(&params->params_le, ssid); params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION); params->action = cpu_to_le16(action); params->scan_duration = cpu_to_le16(0); err = brcmf_dev_iovar_setbuf(iscan->ndev, "iscan", params, params_size, iscan->dcmd_buf, BRCMF_DCMD_SMLEN); if (err) { if (err == -EBUSY) WL_INFO("system busy : iscan canceled\n"); else WL_ERR("error (%d)\n", err); } kfree(params); return err; } static s32 brcmf_do_iscan(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); struct net_device *ndev = cfg_to_ndev(cfg_priv); struct brcmf_ssid ssid; __le32 passive_scan; s32 err = 0; /* Broadcast scan by default */ memset(&ssid, 0, sizeof(ssid)); iscan->state = WL_ISCAN_STATE_SCANING; passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1); err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_SET_PASSIVE_SCAN, &passive_scan, sizeof(passive_scan)); if (err) { WL_ERR("error (%d)\n", err); return err; } brcmf_set_mpc(ndev, 0); cfg_priv->iscan_kickstart = true; err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START); if (err) { brcmf_set_mpc(ndev, 1); cfg_priv->iscan_kickstart = false; return err; } mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000); iscan->timer_on = 1; return err; } static s32 __brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request, struct cfg80211_ssid *this_ssid) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct cfg80211_ssid *ssids; struct brcmf_cfg80211_scan_req *sr = cfg_priv->scan_req_int; __le32 passive_scan; bool iscan_req; bool spec_scan; s32 err = 0; u32 SSID_len; if (test_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { WL_ERR("Scanning already : status (%lu)\n", cfg_priv->status); return -EAGAIN; } if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status)) { WL_ERR("Scanning being aborted : status (%lu)\n", cfg_priv->status); return -EAGAIN; } if (test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) { WL_ERR("Connecting : status (%lu)\n", cfg_priv->status); return -EAGAIN; } iscan_req = false; spec_scan = false; if (request) { /* scan bss */ ssids = request->ssids; if (cfg_priv->iscan_on && (!ssids || !ssids->ssid_len)) iscan_req = true; } else { /* scan in ibss */ /* we don't do iscan in ibss */ ssids = this_ssid; } cfg_priv->scan_request = request; set_bit(WL_STATUS_SCANNING, &cfg_priv->status); if (iscan_req) { err = brcmf_do_iscan(cfg_priv); if (!err) return err; else goto scan_out; } else { WL_SCAN("ssid \"%s\", ssid_len (%d)\n", ssids->ssid, ssids->ssid_len); memset(&sr->ssid_le, 0, sizeof(sr->ssid_le)); SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len); sr->ssid_le.SSID_len = cpu_to_le32(0); if (SSID_len) { memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len); sr->ssid_le.SSID_len = cpu_to_le32(SSID_len); spec_scan = true; } else { WL_SCAN("Broadcast scan\n"); } passive_scan = cfg_priv->active_scan ? 0 : cpu_to_le32(1); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN, &passive_scan, sizeof(passive_scan)); if (err) { WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err); goto scan_out; } brcmf_set_mpc(ndev, 0); err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le, sizeof(sr->ssid_le)); if (err) { if (err == -EBUSY) WL_INFO("system busy : scan for \"%s\" " "canceled\n", sr->ssid_le.SSID); else WL_ERR("WLC_SCAN error (%d)\n", err); brcmf_set_mpc(ndev, 1); goto scan_out; } } return 0; scan_out: clear_bit(WL_STATUS_SCANNING, &cfg_priv->status); cfg_priv->scan_request = NULL; return err; } static s32 brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_scan_request *request) { s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; err = __brcmf_cfg80211_scan(wiphy, ndev, request, NULL); if (err) WL_ERR("scan error (%d)\n", err); WL_TRACE("Exit\n"); return err; } static s32 brcmf_set_rts(struct net_device *ndev, u32 rts_threshold) { s32 err = 0; err = brcmf_dev_intvar_set(ndev, "rtsthresh", rts_threshold); if (err) WL_ERR("Error (%d)\n", err); return err; } static s32 brcmf_set_frag(struct net_device *ndev, u32 frag_threshold) { s32 err = 0; err = brcmf_dev_intvar_set(ndev, "fragthresh", frag_threshold); if (err) WL_ERR("Error (%d)\n", err); return err; } static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l) { s32 err = 0; u32 cmd = (l ? BRCM_SET_LRL : BRCM_SET_SRL); err = brcmf_exec_dcmd_u32(ndev, cmd, &retry); if (err) { WL_ERR("cmd (%d) , error (%d)\n", cmd, err); return err; } return err; } static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct net_device *ndev = cfg_to_ndev(cfg_priv); s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; if (changed & WIPHY_PARAM_RTS_THRESHOLD && (cfg_priv->conf->rts_threshold != wiphy->rts_threshold)) { cfg_priv->conf->rts_threshold = wiphy->rts_threshold; err = brcmf_set_rts(ndev, cfg_priv->conf->rts_threshold); if (!err) goto done; } if (changed & WIPHY_PARAM_FRAG_THRESHOLD && (cfg_priv->conf->frag_threshold != wiphy->frag_threshold)) { cfg_priv->conf->frag_threshold = wiphy->frag_threshold; err = brcmf_set_frag(ndev, cfg_priv->conf->frag_threshold); if (!err) goto done; } if (changed & WIPHY_PARAM_RETRY_LONG && (cfg_priv->conf->retry_long != wiphy->retry_long)) { cfg_priv->conf->retry_long = wiphy->retry_long; err = brcmf_set_retry(ndev, cfg_priv->conf->retry_long, true); if (!err) goto done; } if (changed & WIPHY_PARAM_RETRY_SHORT && (cfg_priv->conf->retry_short != wiphy->retry_short)) { cfg_priv->conf->retry_short = wiphy->retry_short; err = brcmf_set_retry(ndev, cfg_priv->conf->retry_short, false); if (!err) goto done; } done: WL_TRACE("Exit\n"); return err; } static void *brcmf_read_prof(struct brcmf_cfg80211_priv *cfg_priv, s32 item) { switch (item) { case WL_PROF_SEC: return &cfg_priv->profile->sec; case WL_PROF_BSSID: return &cfg_priv->profile->bssid; case WL_PROF_SSID: return &cfg_priv->profile->ssid; } WL_ERR("invalid item (%d)\n", item); return NULL; } static s32 brcmf_update_prof(struct brcmf_cfg80211_priv *cfg_priv, const struct brcmf_event_msg *e, void *data, s32 item) { s32 err = 0; struct brcmf_ssid *ssid; switch (item) { case WL_PROF_SSID: ssid = (struct brcmf_ssid *) data; memset(cfg_priv->profile->ssid.SSID, 0, sizeof(cfg_priv->profile->ssid.SSID)); memcpy(cfg_priv->profile->ssid.SSID, ssid->SSID, ssid->SSID_len); cfg_priv->profile->ssid.SSID_len = ssid->SSID_len; break; case WL_PROF_BSSID: if (data) memcpy(cfg_priv->profile->bssid, data, ETH_ALEN); else memset(cfg_priv->profile->bssid, 0, ETH_ALEN); break; case WL_PROF_SEC: memcpy(&cfg_priv->profile->sec, data, sizeof(cfg_priv->profile->sec)); break; case WL_PROF_BEACONINT: cfg_priv->profile->beacon_interval = *(u16 *)data; break; case WL_PROF_DTIMPERIOD: cfg_priv->profile->dtim_period = *(u8 *)data; break; default: WL_ERR("unsupported item (%d)\n", item); err = -EOPNOTSUPP; break; } return err; } static void brcmf_init_prof(struct brcmf_cfg80211_profile *prof) { memset(prof, 0, sizeof(*prof)); } static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params, size_t *join_params_size) { u16 chanspec = 0; if (ch != 0) { if (ch <= CH_MAX_2G_CHANNEL) chanspec |= WL_CHANSPEC_BAND_2G; else chanspec |= WL_CHANSPEC_BAND_5G; chanspec |= WL_CHANSPEC_BW_20; chanspec |= WL_CHANSPEC_CTL_SB_NONE; *join_params_size += BRCMF_ASSOC_PARAMS_FIXED_SIZE + sizeof(u16); chanspec |= (ch & WL_CHANSPEC_CHAN_MASK); join_params->params_le.chanspec_list[0] = cpu_to_le16(chanspec); join_params->params_le.chanspec_num = cpu_to_le32(1); WL_CONN("join_params->params.chanspec_list[0]= %#X," "channel %d, chanspec %#X\n", chanspec, ch, chanspec); } } static void brcmf_link_down(struct brcmf_cfg80211_priv *cfg_priv) { struct net_device *ndev = NULL; s32 err = 0; WL_TRACE("Enter\n"); if (cfg_priv->link_up) { ndev = cfg_to_ndev(cfg_priv); WL_INFO("Call WLC_DISASSOC to stop excess roaming\n "); err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0); if (err) WL_ERR("WLC_DISASSOC failed (%d)\n", err); cfg_priv->link_up = false; } WL_TRACE("Exit\n"); } static s32 brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_ibss_params *params) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct brcmf_join_params join_params; size_t join_params_size = 0; s32 err = 0; s32 wsec = 0; s32 bcnprd; struct brcmf_ssid ssid; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; if (params->ssid) WL_CONN("SSID: %s\n", params->ssid); else { WL_CONN("SSID: NULL, Not supported\n"); return -EOPNOTSUPP; } set_bit(WL_STATUS_CONNECTING, &cfg_priv->status); if (params->bssid) WL_CONN("BSSID: %02X %02X %02X %02X %02X %02X\n", params->bssid[0], params->bssid[1], params->bssid[2], params->bssid[3], params->bssid[4], params->bssid[5]); else WL_CONN("No BSSID specified\n"); if (params->channel) WL_CONN("channel: %d\n", params->channel->center_freq); else WL_CONN("no channel specified\n"); if (params->channel_fixed) WL_CONN("fixed channel required\n"); else WL_CONN("no fixed channel required\n"); if (params->ie && params->ie_len) WL_CONN("ie len: %d\n", params->ie_len); else WL_CONN("no ie specified\n"); if (params->beacon_interval) WL_CONN("beacon interval: %d\n", params->beacon_interval); else WL_CONN("no beacon interval specified\n"); if (params->basic_rates) WL_CONN("basic rates: %08X\n", params->basic_rates); else WL_CONN("no basic rates specified\n"); if (params->privacy) WL_CONN("privacy required\n"); else WL_CONN("no privacy required\n"); /* Configure Privacy for starter */ if (params->privacy) wsec |= WEP_ENABLED; err = brcmf_dev_intvar_set(ndev, "wsec", wsec); if (err) { WL_ERR("wsec failed (%d)\n", err); goto done; } /* Configure Beacon Interval for starter */ if (params->beacon_interval) bcnprd = params->beacon_interval; else bcnprd = 100; err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_BCNPRD, &bcnprd); if (err) { WL_ERR("WLC_SET_BCNPRD failed (%d)\n", err); goto done; } /* Configure required join parameter */ memset(&join_params, 0, sizeof(struct brcmf_join_params)); /* SSID */ ssid.SSID_len = min_t(u32, params->ssid_len, 32); memcpy(ssid.SSID, params->ssid, ssid.SSID_len); memcpy(join_params.ssid_le.SSID, params->ssid, ssid.SSID_len); join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); join_params_size = sizeof(join_params.ssid_le); brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID); /* BSSID */ if (params->bssid) { memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN); join_params_size = sizeof(join_params.ssid_le) + BRCMF_ASSOC_PARAMS_FIXED_SIZE; } else { memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN); } brcmf_update_prof(cfg_priv, NULL, &join_params.params_le.bssid, WL_PROF_BSSID); /* Channel */ if (params->channel) { u32 target_channel; cfg_priv->channel = ieee80211_frequency_to_channel( params->channel->center_freq); if (params->channel_fixed) { /* adding chanspec */ brcmf_ch_to_chanspec(cfg_priv->channel, &join_params, &join_params_size); } /* set channel for starter */ target_channel = cfg_priv->channel; err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL, &target_channel); if (err) { WL_ERR("WLC_SET_CHANNEL failed (%d)\n", err); goto done; } } else cfg_priv->channel = 0; cfg_priv->ibss_starter = false; err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params, join_params_size); if (err) { WL_ERR("WLC_SET_SSID failed (%d)\n", err); goto done; } done: if (err) clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; brcmf_link_down(cfg_priv); WL_TRACE("Exit\n"); return err; } static s32 brcmf_set_wpa_version(struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct brcmf_cfg80211_security *sec; s32 val = 0; s32 err = 0; if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; else val = WPA_AUTH_DISABLED; WL_CONN("setting wpa_auth to 0x%0x\n", val); err = brcmf_dev_intvar_set(ndev, "wpa_auth", val); if (err) { WL_ERR("set wpa_auth failed (%d)\n", err); return err; } sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); sec->wpa_versions = sme->crypto.wpa_versions; return err; } static s32 brcmf_set_auth_type(struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct brcmf_cfg80211_security *sec; s32 val = 0; s32 err = 0; switch (sme->auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: val = 0; WL_CONN("open system\n"); break; case NL80211_AUTHTYPE_SHARED_KEY: val = 1; WL_CONN("shared key\n"); break; case NL80211_AUTHTYPE_AUTOMATIC: val = 2; WL_CONN("automatic\n"); break; case NL80211_AUTHTYPE_NETWORK_EAP: WL_CONN("network eap\n"); default: val = 2; WL_ERR("invalid auth type (%d)\n", sme->auth_type); break; } err = brcmf_dev_intvar_set(ndev, "auth", val); if (err) { WL_ERR("set auth failed (%d)\n", err); return err; } sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); sec->auth_type = sme->auth_type; return err; } static s32 brcmf_set_set_cipher(struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct brcmf_cfg80211_security *sec; s32 pval = 0; s32 gval = 0; s32 err = 0; if (sme->crypto.n_ciphers_pairwise) { switch (sme->crypto.ciphers_pairwise[0]) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: pval = WEP_ENABLED; break; case WLAN_CIPHER_SUITE_TKIP: pval = TKIP_ENABLED; break; case WLAN_CIPHER_SUITE_CCMP: pval = AES_ENABLED; break; case WLAN_CIPHER_SUITE_AES_CMAC: pval = AES_ENABLED; break; default: WL_ERR("invalid cipher pairwise (%d)\n", sme->crypto.ciphers_pairwise[0]); return -EINVAL; } } if (sme->crypto.cipher_group) { switch (sme->crypto.cipher_group) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: gval = WEP_ENABLED; break; case WLAN_CIPHER_SUITE_TKIP: gval = TKIP_ENABLED; break; case WLAN_CIPHER_SUITE_CCMP: gval = AES_ENABLED; break; case WLAN_CIPHER_SUITE_AES_CMAC: gval = AES_ENABLED; break; default: WL_ERR("invalid cipher group (%d)\n", sme->crypto.cipher_group); return -EINVAL; } } WL_CONN("pval (%d) gval (%d)\n", pval, gval); err = brcmf_dev_intvar_set(ndev, "wsec", pval | gval); if (err) { WL_ERR("error (%d)\n", err); return err; } sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0]; sec->cipher_group = sme->crypto.cipher_group; return err; } static s32 brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct brcmf_cfg80211_security *sec; s32 val = 0; s32 err = 0; if (sme->crypto.n_akm_suites) { err = brcmf_dev_intvar_get(ndev, "wpa_auth", &val); if (err) { WL_ERR("could not get wpa_auth (%d)\n", err); return err; } if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { switch (sme->crypto.akm_suites[0]) { case WLAN_AKM_SUITE_8021X: val = WPA_AUTH_UNSPECIFIED; break; case WLAN_AKM_SUITE_PSK: val = WPA_AUTH_PSK; break; default: WL_ERR("invalid cipher group (%d)\n", sme->crypto.cipher_group); return -EINVAL; } } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { switch (sme->crypto.akm_suites[0]) { case WLAN_AKM_SUITE_8021X: val = WPA2_AUTH_UNSPECIFIED; break; case WLAN_AKM_SUITE_PSK: val = WPA2_AUTH_PSK; break; default: WL_ERR("invalid cipher group (%d)\n", sme->crypto.cipher_group); return -EINVAL; } } WL_CONN("setting wpa_auth to %d\n", val); err = brcmf_dev_intvar_set(ndev, "wpa_auth", val); if (err) { WL_ERR("could not set wpa_auth (%d)\n", err); return err; } } sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); sec->wpa_auth = sme->crypto.akm_suites[0]; return err; } static s32 brcmf_set_wep_sharedkey(struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); struct brcmf_cfg80211_security *sec; struct brcmf_wsec_key key; s32 val; s32 err = 0; WL_CONN("key len (%d)\n", sme->key_len); if (sme->key_len == 0) return 0; sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n", sec->wpa_versions, sec->cipher_pairwise); if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)) return 0; if (sec->cipher_pairwise & (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104)) { memset(&key, 0, sizeof(key)); key.len = (u32) sme->key_len; key.index = (u32) sme->key_idx; if (key.len > sizeof(key.data)) { WL_ERR("Too long key length (%u)\n", key.len); return -EINVAL; } memcpy(key.data, sme->key, key.len); key.flags = BRCMF_PRIMARY_KEY; switch (sec->cipher_pairwise) { case WLAN_CIPHER_SUITE_WEP40: key.algo = CRYPTO_ALGO_WEP1; break; case WLAN_CIPHER_SUITE_WEP104: key.algo = CRYPTO_ALGO_WEP128; break; default: WL_ERR("Invalid algorithm (%d)\n", sme->crypto.ciphers_pairwise[0]); return -EINVAL; } /* Set the new key/index */ WL_CONN("key length (%d) key index (%d) algo (%d)\n", key.len, key.index, key.algo); WL_CONN("key \"%s\"\n", key.data); err = send_key_to_dongle(ndev, &key); if (err) return err; if (sec->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) { WL_CONN("set auth_type to shared key\n"); val = 1; /* shared key */ err = brcmf_dev_intvar_set(ndev, "auth", val); if (err) { WL_ERR("set auth failed (%d)\n", err); return err; } } } return err; } static s32 brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct ieee80211_channel *chan = sme->channel; struct brcmf_join_params join_params; size_t join_params_size; struct brcmf_ssid ssid; s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; if (!sme->ssid) { WL_ERR("Invalid ssid\n"); return -EOPNOTSUPP; } set_bit(WL_STATUS_CONNECTING, &cfg_priv->status); if (chan) { cfg_priv->channel = ieee80211_frequency_to_channel(chan->center_freq); WL_CONN("channel (%d), center_req (%d)\n", cfg_priv->channel, chan->center_freq); } else cfg_priv->channel = 0; WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len); err = brcmf_set_wpa_version(ndev, sme); if (err) { WL_ERR("wl_set_wpa_version failed (%d)\n", err); goto done; } err = brcmf_set_auth_type(ndev, sme); if (err) { WL_ERR("wl_set_auth_type failed (%d)\n", err); goto done; } err = brcmf_set_set_cipher(ndev, sme); if (err) { WL_ERR("wl_set_set_cipher failed (%d)\n", err); goto done; } err = brcmf_set_key_mgmt(ndev, sme); if (err) { WL_ERR("wl_set_key_mgmt failed (%d)\n", err); goto done; } err = brcmf_set_wep_sharedkey(ndev, sme); if (err) { WL_ERR("brcmf_set_wep_sharedkey failed (%d)\n", err); goto done; } memset(&join_params, 0, sizeof(join_params)); join_params_size = sizeof(join_params.ssid_le); ssid.SSID_len = min_t(u32, sizeof(ssid.SSID), (u32)sme->ssid_len); memcpy(&join_params.ssid_le.SSID, sme->ssid, ssid.SSID_len); memcpy(&ssid.SSID, sme->ssid, ssid.SSID_len); join_params.ssid_le.SSID_len = cpu_to_le32(ssid.SSID_len); brcmf_update_prof(cfg_priv, NULL, &ssid, WL_PROF_SSID); memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN); if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN) WL_CONN("ssid \"%s\", len (%d)\n", ssid.SSID, ssid.SSID_len); brcmf_ch_to_chanspec(cfg_priv->channel, &join_params, &join_params_size); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params, join_params_size); if (err) WL_ERR("WLC_SET_SSID failed (%d)\n", err); done: if (err) clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, u16 reason_code) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct brcmf_scb_val_le scbval; s32 err = 0; WL_TRACE("Enter. Reason code = %d\n", reason_code); if (!check_sys_up(wiphy)) return -EIO; clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status); memcpy(&scbval.ea, brcmf_read_prof(cfg_priv, WL_PROF_BSSID), ETH_ALEN); scbval.val = cpu_to_le32(reason_code); err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval, sizeof(struct brcmf_scb_val_le)); if (err) WL_ERR("error (%d)\n", err); cfg_priv->link_up = false; WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type, s32 mbm) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct net_device *ndev = cfg_to_ndev(cfg_priv); u16 txpwrmw; s32 err = 0; s32 disable = 0; s32 dbm = MBM_TO_DBM(mbm); WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; switch (type) { case NL80211_TX_POWER_AUTOMATIC: break; case NL80211_TX_POWER_LIMITED: case NL80211_TX_POWER_FIXED: if (dbm < 0) { WL_ERR("TX_POWER_FIXED - dbm is negative\n"); err = -EINVAL; goto done; } break; } /* Make sure radio is off or on as far as software is concerned */ disable = WL_RADIO_SW_DISABLE << 16; err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_RADIO, &disable); if (err) WL_ERR("WLC_SET_RADIO error (%d)\n", err); if (dbm > 0xffff) txpwrmw = 0xffff; else txpwrmw = (u16) dbm; err = brcmf_dev_intvar_set(ndev, "qtxpower", (s32) (brcmf_mw_to_qdbm(txpwrmw))); if (err) WL_ERR("qtxpower error (%d)\n", err); cfg_priv->conf->tx_power = dbm; done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct net_device *ndev = cfg_to_ndev(cfg_priv); s32 txpwrdbm; u8 result; s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; err = brcmf_dev_intvar_get(ndev, "qtxpower", &txpwrdbm); if (err) { WL_ERR("error (%d)\n", err); goto done; } result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE); *dbm = (s32) brcmf_qdbm_to_mw(result); done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, bool unicast, bool multicast) { u32 index; u32 wsec; s32 err = 0; WL_TRACE("Enter\n"); WL_CONN("key index (%d)\n", key_idx); if (!check_sys_up(wiphy)) return -EIO; err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec); if (err) { WL_ERR("WLC_GET_WSEC error (%d)\n", err); goto done; } if (wsec & WEP_ENABLED) { /* Just select a new current key */ index = key_idx; err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_KEY_PRIMARY, &index); if (err) WL_ERR("error (%d)\n", err); } done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, const u8 *mac_addr, struct key_params *params) { struct brcmf_wsec_key key; struct brcmf_wsec_key_le key_le; s32 err = 0; memset(&key, 0, sizeof(key)); key.index = (u32) key_idx; /* Instead of bcast for ea address for default wep keys, driver needs it to be Null */ if (!is_multicast_ether_addr(mac_addr)) memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN); key.len = (u32) params->key_len; /* check for key index change */ if (key.len == 0) { /* key delete */ err = send_key_to_dongle(ndev, &key); if (err) return err; } else { if (key.len > sizeof(key.data)) { WL_ERR("Invalid key length (%d)\n", key.len); return -EINVAL; } WL_CONN("Setting the key index %d\n", key.index); memcpy(key.data, params->key, key.len); if (params->cipher == WLAN_CIPHER_SUITE_TKIP) { u8 keybuf[8]; memcpy(keybuf, &key.data[24], sizeof(keybuf)); memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); memcpy(&key.data[16], keybuf, sizeof(keybuf)); } /* if IW_ENCODE_EXT_RX_SEQ_VALID set */ if (params->seq && params->seq_len == 6) { /* rx iv */ u8 *ivptr; ivptr = (u8 *) params->seq; key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | (ivptr[3] << 8) | ivptr[2]; key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; key.iv_initialized = true; } switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: key.algo = CRYPTO_ALGO_WEP1; WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); break; case WLAN_CIPHER_SUITE_WEP104: key.algo = CRYPTO_ALGO_WEP128; WL_CONN("WLAN_CIPHER_SUITE_WEP104\n"); break; case WLAN_CIPHER_SUITE_TKIP: key.algo = CRYPTO_ALGO_TKIP; WL_CONN("WLAN_CIPHER_SUITE_TKIP\n"); break; case WLAN_CIPHER_SUITE_AES_CMAC: key.algo = CRYPTO_ALGO_AES_CCM; WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n"); break; case WLAN_CIPHER_SUITE_CCMP: key.algo = CRYPTO_ALGO_AES_CCM; WL_CONN("WLAN_CIPHER_SUITE_CCMP\n"); break; default: WL_ERR("Invalid cipher (0x%x)\n", params->cipher); return -EINVAL; } convert_key_from_CPU(&key, &key_le); brcmf_netdev_wait_pend8021x(ndev); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_KEY, &key_le, sizeof(key_le)); if (err) { WL_ERR("WLC_SET_KEY error (%d)\n", err); return err; } } return err; } static s32 brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct brcmf_wsec_key key; s32 val; s32 wsec; s32 err = 0; u8 keybuf[8]; WL_TRACE("Enter\n"); WL_CONN("key index (%d)\n", key_idx); if (!check_sys_up(wiphy)) return -EIO; if (mac_addr) { WL_TRACE("Exit"); return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params); } memset(&key, 0, sizeof(key)); key.len = (u32) params->key_len; key.index = (u32) key_idx; if (key.len > sizeof(key.data)) { WL_ERR("Too long key length (%u)\n", key.len); err = -EINVAL; goto done; } memcpy(key.data, params->key, key.len); key.flags = BRCMF_PRIMARY_KEY; switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: key.algo = CRYPTO_ALGO_WEP1; WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); break; case WLAN_CIPHER_SUITE_WEP104: key.algo = CRYPTO_ALGO_WEP128; WL_CONN("WLAN_CIPHER_SUITE_WEP104\n"); break; case WLAN_CIPHER_SUITE_TKIP: memcpy(keybuf, &key.data[24], sizeof(keybuf)); memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); memcpy(&key.data[16], keybuf, sizeof(keybuf)); key.algo = CRYPTO_ALGO_TKIP; WL_CONN("WLAN_CIPHER_SUITE_TKIP\n"); break; case WLAN_CIPHER_SUITE_AES_CMAC: key.algo = CRYPTO_ALGO_AES_CCM; WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n"); break; case WLAN_CIPHER_SUITE_CCMP: key.algo = CRYPTO_ALGO_AES_CCM; WL_CONN("WLAN_CIPHER_SUITE_CCMP\n"); break; default: WL_ERR("Invalid cipher (0x%x)\n", params->cipher); err = -EINVAL; goto done; } err = send_key_to_dongle(ndev, &key); /* Set the new key/index */ if (err) goto done; val = WEP_ENABLED; err = brcmf_dev_intvar_get(ndev, "wsec", &wsec); if (err) { WL_ERR("get wsec error (%d)\n", err); goto done; } wsec &= ~(WEP_ENABLED); wsec |= val; err = brcmf_dev_intvar_set(ndev, "wsec", wsec); if (err) { WL_ERR("set wsec error (%d)\n", err); goto done; } val = 1; /* assume shared key. otherwise 0 */ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val); if (err) WL_ERR("WLC_SET_AUTH error (%d)\n", err); done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, bool pairwise, const u8 *mac_addr) { struct brcmf_wsec_key key; s32 err = 0; s32 val; s32 wsec; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; memset(&key, 0, sizeof(key)); key.index = (u32) key_idx; key.flags = BRCMF_PRIMARY_KEY; key.algo = CRYPTO_ALGO_OFF; WL_CONN("key index (%d)\n", key_idx); /* Set the new key/index */ err = send_key_to_dongle(ndev, &key); if (err) { if (err == -EINVAL) { if (key.index >= DOT11_MAX_DEFAULT_KEYS) /* we ignore this key index in this case */ WL_ERR("invalid key index (%d)\n", key_idx); } /* Ignore this error, may happen during DISASSOC */ err = -EAGAIN; goto done; } val = 0; err = brcmf_dev_intvar_get(ndev, "wsec", &wsec); if (err) { WL_ERR("get wsec error (%d)\n", err); /* Ignore this error, may happen during DISASSOC */ err = -EAGAIN; goto done; } wsec &= ~(WEP_ENABLED); wsec |= val; err = brcmf_dev_intvar_set(ndev, "wsec", wsec); if (err) { WL_ERR("set wsec error (%d)\n", err); /* Ignore this error, may happen during DISASSOC */ err = -EAGAIN; goto done; } val = 0; /* assume open key. otherwise 1 */ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AUTH, &val); if (err) { WL_ERR("WLC_SET_AUTH error (%d)\n", err); /* Ignore this error, may happen during DISASSOC */ err = -EAGAIN; } done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback) (void *cookie, struct key_params * params)) { struct key_params params; struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct brcmf_cfg80211_security *sec; s32 wsec; s32 err = 0; WL_TRACE("Enter\n"); WL_CONN("key index (%d)\n", key_idx); if (!check_sys_up(wiphy)) return -EIO; memset(&params, 0, sizeof(params)); err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_WSEC, &wsec); if (err) { WL_ERR("WLC_GET_WSEC error (%d)\n", err); /* Ignore this error, may happen during DISASSOC */ err = -EAGAIN; goto done; } switch (wsec) { case WEP_ENABLED: sec = brcmf_read_prof(cfg_priv, WL_PROF_SEC); if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { params.cipher = WLAN_CIPHER_SUITE_WEP40; WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) { params.cipher = WLAN_CIPHER_SUITE_WEP104; WL_CONN("WLAN_CIPHER_SUITE_WEP104\n"); } break; case TKIP_ENABLED: params.cipher = WLAN_CIPHER_SUITE_TKIP; WL_CONN("WLAN_CIPHER_SUITE_TKIP\n"); break; case AES_ENABLED: params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n"); break; default: WL_ERR("Invalid algo (0x%x)\n", wsec); err = -EINVAL; goto done; } callback(cookie, &params); done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx) { WL_INFO("Not supported\n"); return -EOPNOTSUPP; } static s32 brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, u8 *mac, struct station_info *sinfo) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct brcmf_scb_val_le scb_val; int rssi; s32 rate; s32 err = 0; u8 *bssid = brcmf_read_prof(cfg_priv, WL_PROF_BSSID); WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; if (memcmp(mac, bssid, ETH_ALEN)) { WL_ERR("Wrong Mac address cfg_mac-%X:%X:%X:%X:%X:%X" "wl_bssid-%X:%X:%X:%X:%X:%X\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5]); err = -ENOENT; goto done; } /* Report the current tx rate */ err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate); if (err) { WL_ERR("Could not get rate (%d)\n", err); } else { sinfo->filled |= STATION_INFO_TX_BITRATE; sinfo->txrate.legacy = rate * 5; WL_CONN("Rate %d Mbps\n", rate / 2); } if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) { scb_val.val = cpu_to_le32(0); err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val, sizeof(struct brcmf_scb_val_le)); if (err) WL_ERR("Could not get rssi (%d)\n", err); rssi = le32_to_cpu(scb_val.val); sinfo->filled |= STATION_INFO_SIGNAL; sinfo->signal = rssi; WL_CONN("RSSI %d dBm\n", rssi); } done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev, bool enabled, s32 timeout) { s32 pm; s32 err = 0; struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); WL_TRACE("Enter\n"); /* * Powersave enable/disable request is coming from the * cfg80211 even before the interface is up. In that * scenario, driver will be storing the power save * preference in cfg_priv struct to apply this to * FW later while initializing the dongle */ cfg_priv->pwr_save = enabled; if (!test_bit(WL_STATUS_READY, &cfg_priv->status)) { WL_INFO("Device is not ready," "storing the value in cfg_priv struct\n"); goto done; } pm = enabled ? PM_FAST : PM_OFF; WL_INFO("power save %s\n", (pm ? "enabled" : "disabled")); err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &pm); if (err) { if (err == -ENODEV) WL_ERR("net_device is not ready yet\n"); else WL_ERR("error (%d)\n", err); } done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev, const u8 *addr, const struct cfg80211_bitrate_mask *mask) { struct brcm_rateset_le rateset_le; s32 rate; s32 val; s32 err_bg; s32 err_a; u32 legacy; s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; /* addr param is always NULL. ignore it */ /* Get current rateset */ err = brcmf_exec_dcmd(ndev, BRCM_GET_CURR_RATESET, &rateset_le, sizeof(rateset_le)); if (err) { WL_ERR("could not get current rateset (%d)\n", err); goto done; } legacy = ffs(mask->control[IEEE80211_BAND_2GHZ].legacy & 0xFFFF); if (!legacy) legacy = ffs(mask->control[IEEE80211_BAND_5GHZ].legacy & 0xFFFF); val = wl_g_rates[legacy - 1].bitrate * 100000; if (val < le32_to_cpu(rateset_le.count)) /* Select rate by rateset index */ rate = rateset_le.rates[val] & 0x7f; else /* Specified rate in bps */ rate = val / 500000; WL_CONN("rate %d mbps\n", rate / 2); /* * * Set rate override, * Since the is a/b/g-blind, both a/bg_rate are enforced. */ err_bg = brcmf_dev_intvar_set(ndev, "bg_rate", rate); err_a = brcmf_dev_intvar_set(ndev, "a_rate", rate); if (err_bg && err_a) { WL_ERR("could not set fixed rate (%d) (%d)\n", err_bg, err_a); err = err_bg | err_a; } done: WL_TRACE("Exit\n"); return err; } static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv, struct brcmf_bss_info_le *bi) { struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); struct ieee80211_channel *notify_channel; struct cfg80211_bss *bss; struct ieee80211_supported_band *band; s32 err = 0; u16 channel; u32 freq; u16 notify_capability; u16 notify_interval; u8 *notify_ie; size_t notify_ielen; s32 notify_signal; if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) { WL_ERR("Bss info is larger than buffer. Discarding\n"); return 0; } channel = bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec)); if (channel <= CH_MAX_2G_CHANNEL) band = wiphy->bands[IEEE80211_BAND_2GHZ]; else band = wiphy->bands[IEEE80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(channel, band->band); notify_channel = ieee80211_get_channel(wiphy, freq); notify_capability = le16_to_cpu(bi->capability); notify_interval = le16_to_cpu(bi->beacon_period); notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset); notify_ielen = le32_to_cpu(bi->ie_length); notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; WL_CONN("bssid: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", bi->BSSID[0], bi->BSSID[1], bi->BSSID[2], bi->BSSID[3], bi->BSSID[4], bi->BSSID[5]); WL_CONN("Channel: %d(%d)\n", channel, freq); WL_CONN("Capability: %X\n", notify_capability); WL_CONN("Beacon interval: %d\n", notify_interval); WL_CONN("Signal: %d\n", notify_signal); bss = cfg80211_inform_bss(wiphy, notify_channel, (const u8 *)bi->BSSID, 0, notify_capability, notify_interval, notify_ie, notify_ielen, notify_signal, GFP_KERNEL); if (!bss) return -ENOMEM; cfg80211_put_bss(bss); return err; } static struct brcmf_bss_info_le * next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss) { if (bss == NULL) return list->bss_info_le; return (struct brcmf_bss_info_le *)((unsigned long)bss + le32_to_cpu(bss->length)); } static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_scan_results *bss_list; struct brcmf_bss_info_le *bi = NULL; /* must be initialized */ s32 err = 0; int i; bss_list = cfg_priv->bss_list; if (bss_list->version != BRCMF_BSS_INFO_VERSION) { WL_ERR("Version %d != WL_BSS_INFO_VERSION\n", bss_list->version); return -EOPNOTSUPP; } WL_SCAN("scanned AP count (%d)\n", bss_list->count); for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) { bi = next_bss_le(bss_list, bi); err = brcmf_inform_single_bss(cfg_priv, bi); if (err) break; } return err; } static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const u8 *bssid) { struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); struct ieee80211_channel *notify_channel; struct brcmf_bss_info_le *bi = NULL; struct ieee80211_supported_band *band; struct cfg80211_bss *bss; u8 *buf = NULL; s32 err = 0; u16 channel; u32 freq; u16 notify_capability; u16 notify_interval; u8 *notify_ie; size_t notify_ielen; s32 notify_signal; WL_TRACE("Enter\n"); buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); if (buf == NULL) { err = -ENOMEM; goto CleanUp; } *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX); err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX); if (err) { WL_ERR("WLC_GET_BSS_INFO failed: %d\n", err); goto CleanUp; } bi = (struct brcmf_bss_info_le *)(buf + 4); channel = bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec)); if (channel <= CH_MAX_2G_CHANNEL) band = wiphy->bands[IEEE80211_BAND_2GHZ]; else band = wiphy->bands[IEEE80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(channel, band->band); notify_channel = ieee80211_get_channel(wiphy, freq); notify_capability = le16_to_cpu(bi->capability); notify_interval = le16_to_cpu(bi->beacon_period); notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset); notify_ielen = le32_to_cpu(bi->ie_length); notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; WL_CONN("channel: %d(%d)\n", channel, freq); WL_CONN("capability: %X\n", notify_capability); WL_CONN("beacon interval: %d\n", notify_interval); WL_CONN("signal: %d\n", notify_signal); bss = cfg80211_inform_bss(wiphy, notify_channel, bssid, 0, notify_capability, notify_interval, notify_ie, notify_ielen, notify_signal, GFP_KERNEL); if (!bss) { err = -ENOMEM; goto CleanUp; } cfg80211_put_bss(bss); CleanUp: kfree(buf); WL_TRACE("Exit\n"); return err; } static bool brcmf_is_ibssmode(struct brcmf_cfg80211_priv *cfg_priv) { return cfg_priv->conf->mode == WL_MODE_IBSS; } /* * Traverse a string of 1-byte tag/1-byte length/variable-length value * triples, returning a pointer to the substring whose first element * matches tag */ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key) { struct brcmf_tlv *elt; int totlen; elt = (struct brcmf_tlv *) buf; totlen = buflen; /* find tagged parameter */ while (totlen >= 2) { int len = elt->len; /* validate remaining totlen */ if ((elt->id == key) && (totlen >= (len + 2))) return elt; elt = (struct brcmf_tlv *) ((u8 *) elt + (len + 2)); totlen -= (len + 2); } return NULL; } static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_bss_info_le *bi; struct brcmf_ssid *ssid; struct brcmf_tlv *tim; u16 beacon_interval; u8 dtim_period; size_t ie_len; u8 *ie; s32 err = 0; WL_TRACE("Enter\n"); if (brcmf_is_ibssmode(cfg_priv)) return err; ssid = (struct brcmf_ssid *)brcmf_read_prof(cfg_priv, WL_PROF_SSID); *(__le32 *)cfg_priv->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX); err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCMF_C_GET_BSS_INFO, cfg_priv->extra_buf, WL_EXTRA_BUF_MAX); if (err) { WL_ERR("Could not get bss info %d\n", err); goto update_bss_info_out; } bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4); err = brcmf_inform_single_bss(cfg_priv, bi); if (err) goto update_bss_info_out; ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset); ie_len = le32_to_cpu(bi->ie_length); beacon_interval = le16_to_cpu(bi->beacon_period); tim = brcmf_parse_tlvs(ie, ie_len, WLAN_EID_TIM); if (tim) dtim_period = tim->data[1]; else { /* * active scan was done so we could not get dtim * information out of probe response. * so we speficially query dtim information to dongle. */ u32 var; err = brcmf_dev_intvar_get(cfg_to_ndev(cfg_priv), "dtim_assoc", &var); if (err) { WL_ERR("wl dtim_assoc failed (%d)\n", err); goto update_bss_info_out; } dtim_period = (u8)var; } brcmf_update_prof(cfg_priv, NULL, &beacon_interval, WL_PROF_BEACONINT); brcmf_update_prof(cfg_priv, NULL, &dtim_period, WL_PROF_DTIMPERIOD); update_bss_info_out: WL_TRACE("Exit"); return err; } static void brcmf_term_iscan(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); struct brcmf_ssid ssid; if (cfg_priv->iscan_on) { iscan->state = WL_ISCAN_STATE_IDLE; if (iscan->timer_on) { del_timer_sync(&iscan->timer); iscan->timer_on = 0; } cancel_work_sync(&iscan->work); /* Abort iscan running in FW */ memset(&ssid, 0, sizeof(ssid)); brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT); } } static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan, bool aborted) { struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan); struct net_device *ndev = cfg_to_ndev(cfg_priv); if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { WL_ERR("Scan complete while device not scanning\n"); return; } if (cfg_priv->scan_request) { WL_SCAN("ISCAN Completed scan: %s\n", aborted ? "Aborted" : "Done"); cfg80211_scan_done(cfg_priv->scan_request, aborted); brcmf_set_mpc(ndev, 1); cfg_priv->scan_request = NULL; } cfg_priv->iscan_kickstart = false; } static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan) { if (iscan->state != WL_ISCAN_STATE_IDLE) { WL_SCAN("wake up iscan\n"); schedule_work(&iscan->work); return 0; } return -EIO; } static s32 brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status, struct brcmf_scan_results **bss_list) { struct brcmf_iscan_results list; struct brcmf_scan_results *results; struct brcmf_scan_results_le *results_le; struct brcmf_iscan_results *list_buf; s32 err = 0; memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX); list_buf = (struct brcmf_iscan_results *)iscan->scan_buf; results = &list_buf->results; results_le = &list_buf->results_le; results->buflen = BRCMF_ISCAN_RESULTS_FIXED_SIZE; results->version = 0; results->count = 0; memset(&list, 0, sizeof(list)); list.results_le.buflen = cpu_to_le32(WL_ISCAN_BUF_MAX); err = brcmf_dev_iovar_getbuf(iscan->ndev, "iscanresults", &list, BRCMF_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf, WL_ISCAN_BUF_MAX); if (err) { WL_ERR("error (%d)\n", err); return err; } results->buflen = le32_to_cpu(results_le->buflen); results->version = le32_to_cpu(results_le->version); results->count = le32_to_cpu(results_le->count); WL_SCAN("results->count = %d\n", results_le->count); WL_SCAN("results->buflen = %d\n", results_le->buflen); *status = le32_to_cpu(list_buf->status_le); WL_SCAN("status = %d\n", *status); *bss_list = results; return err; } static s32 brcmf_iscan_done(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; s32 err = 0; iscan->state = WL_ISCAN_STATE_IDLE; brcmf_inform_bss(cfg_priv); brcmf_notify_iscan_complete(iscan, false); return err; } static s32 brcmf_iscan_pending(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; s32 err = 0; /* Reschedule the timer */ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000); iscan->timer_on = 1; return err; } static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; s32 err = 0; brcmf_inform_bss(cfg_priv); brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE); /* Reschedule the timer */ mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000); iscan->timer_on = 1; return err; } static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_priv->iscan; s32 err = 0; iscan->state = WL_ISCAN_STATE_IDLE; brcmf_notify_iscan_complete(iscan, true); return err; } static void brcmf_cfg80211_iscan_handler(struct work_struct *work) { struct brcmf_cfg80211_iscan_ctrl *iscan = container_of(work, struct brcmf_cfg80211_iscan_ctrl, work); struct brcmf_cfg80211_priv *cfg_priv = iscan_to_cfg(iscan); struct brcmf_cfg80211_iscan_eloop *el = &iscan->el; u32 status = BRCMF_SCAN_RESULTS_PARTIAL; if (iscan->timer_on) { del_timer_sync(&iscan->timer); iscan->timer_on = 0; } if (brcmf_get_iscan_results(iscan, &status, &cfg_priv->bss_list)) { status = BRCMF_SCAN_RESULTS_ABORTED; WL_ERR("Abort iscan\n"); } el->handler[status](cfg_priv); } static void brcmf_iscan_timer(unsigned long data) { struct brcmf_cfg80211_iscan_ctrl *iscan = (struct brcmf_cfg80211_iscan_ctrl *)data; if (iscan) { iscan->timer_on = 0; WL_SCAN("timer expired\n"); brcmf_wakeup_iscan(iscan); } } static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); if (cfg_priv->iscan_on) { iscan->state = WL_ISCAN_STATE_IDLE; INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler); } return 0; } static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el) { memset(el, 0, sizeof(*el)); el->handler[BRCMF_SCAN_RESULTS_SUCCESS] = brcmf_iscan_done; el->handler[BRCMF_SCAN_RESULTS_PARTIAL] = brcmf_iscan_inprogress; el->handler[BRCMF_SCAN_RESULTS_PENDING] = brcmf_iscan_pending; el->handler[BRCMF_SCAN_RESULTS_ABORTED] = brcmf_iscan_aborted; el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted; } static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg_priv); int err = 0; if (cfg_priv->iscan_on) { iscan->ndev = cfg_to_ndev(cfg_priv); brcmf_init_iscan_eloop(&iscan->el); iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS; init_timer(&iscan->timer); iscan->timer.data = (unsigned long) iscan; iscan->timer.function = brcmf_iscan_timer; err = brcmf_invoke_iscan(cfg_priv); if (!err) iscan->data = cfg_priv; } return err; } static __always_inline void brcmf_delay(u32 ms) { if (ms < 1000 / HZ) { cond_resched(); mdelay(ms); } else { msleep(ms); } } static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); /* * Check for WL_STATUS_READY before any function call which * could result is bus access. Don't block the resume for * any driver error conditions */ WL_TRACE("Enter\n"); if (test_bit(WL_STATUS_READY, &cfg_priv->status)) brcmf_invoke_iscan(wiphy_to_cfg(wiphy)); WL_TRACE("Exit\n"); return 0; } static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct net_device *ndev = cfg_to_ndev(cfg_priv); WL_TRACE("Enter\n"); /* * Check for WL_STATUS_READY before any function call which * could result is bus access. Don't block the suspend for * any driver error conditions */ /* * While going to suspend if associated with AP disassociate * from AP to save power while system is in suspended state */ if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) || test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) && test_bit(WL_STATUS_READY, &cfg_priv->status)) { WL_INFO("Disassociating from AP" " while entering suspend state\n"); brcmf_link_down(cfg_priv); /* * Make sure WPA_Supplicant receives all the event * generated due to DISASSOC call to the fw to keep * the state fw and WPA_Supplicant state consistent */ brcmf_delay(500); } set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status); if (test_bit(WL_STATUS_READY, &cfg_priv->status)) brcmf_term_iscan(cfg_priv); if (cfg_priv->scan_request) { /* Indidate scan abort to cfg80211 layer */ WL_INFO("Terminating scan in progress\n"); cfg80211_scan_done(cfg_priv->scan_request, true); cfg_priv->scan_request = NULL; } clear_bit(WL_STATUS_SCANNING, &cfg_priv->status); clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status); /* Turn off watchdog timer */ if (test_bit(WL_STATUS_READY, &cfg_priv->status)) { WL_INFO("Enable MPC\n"); brcmf_set_mpc(ndev, 1); } WL_TRACE("Exit\n"); return 0; } static __used s32 brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); u32 buflen; buflen = brcmf_c_mkiovar(name, buf, len, cfg_priv->dcmd_buf, WL_DCMD_LEN_MAX); BUG_ON(!buflen); return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg_priv->dcmd_buf, buflen); } static s32 brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf, s32 buf_len) { struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); u32 len; s32 err = 0; len = brcmf_c_mkiovar(name, NULL, 0, cfg_priv->dcmd_buf, WL_DCMD_LEN_MAX); BUG_ON(!len); err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg_priv->dcmd_buf, WL_DCMD_LEN_MAX); if (err) { WL_ERR("error (%d)\n", err); return err; } memcpy(buf, cfg_priv->dcmd_buf, buf_len); return err; } static __used s32 brcmf_update_pmklist(struct net_device *ndev, struct brcmf_cfg80211_pmk_list *pmk_list, s32 err) { int i, j; int pmkid_len; pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid); WL_CONN("No of elements %d\n", pmkid_len); for (i = 0; i < pmkid_len; i++) { WL_CONN("PMKID[%d]: %pM =\n", i, &pmk_list->pmkids.pmkid[i].BSSID); for (j = 0; j < WLAN_PMKID_LEN; j++) WL_CONN("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]); } if (!err) brcmf_dev_bufvar_set(ndev, "pmkid_info", (char *)pmk_list, sizeof(*pmk_list)); return err; } static s32 brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_pmksa *pmksa) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct pmkid_list *pmkids = &cfg_priv->pmk_list->pmkids; s32 err = 0; int i; int pmkid_len; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; pmkid_len = le32_to_cpu(pmkids->npmkid); for (i = 0; i < pmkid_len; i++) if (!memcmp(pmksa->bssid, pmkids->pmkid[i].BSSID, ETH_ALEN)) break; if (i < WL_NUM_PMKIDS_MAX) { memcpy(pmkids->pmkid[i].BSSID, pmksa->bssid, ETH_ALEN); memcpy(pmkids->pmkid[i].PMKID, pmksa->pmkid, WLAN_PMKID_LEN); if (i == pmkid_len) { pmkid_len++; pmkids->npmkid = cpu_to_le32(pmkid_len); } } else err = -EINVAL; WL_CONN("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n", pmkids->pmkid[pmkid_len].BSSID); for (i = 0; i < WLAN_PMKID_LEN; i++) WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]); err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err); WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_pmksa *pmksa) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); struct pmkid_list pmkid; s32 err = 0; int i, pmkid_len; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN); memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN); WL_CONN("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n", &pmkid.pmkid[0].BSSID); for (i = 0; i < WLAN_PMKID_LEN; i++) WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]); pmkid_len = le32_to_cpu(cfg_priv->pmk_list->pmkids.npmkid); for (i = 0; i < pmkid_len; i++) if (!memcmp (pmksa->bssid, &cfg_priv->pmk_list->pmkids.pmkid[i].BSSID, ETH_ALEN)) break; if ((pmkid_len > 0) && (i < pmkid_len)) { memset(&cfg_priv->pmk_list->pmkids.pmkid[i], 0, sizeof(struct pmkid)); for (; i < (pmkid_len - 1); i++) { memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].BSSID, &cfg_priv->pmk_list->pmkids.pmkid[i + 1].BSSID, ETH_ALEN); memcpy(&cfg_priv->pmk_list->pmkids.pmkid[i].PMKID, &cfg_priv->pmk_list->pmkids.pmkid[i + 1].PMKID, WLAN_PMKID_LEN); } cfg_priv->pmk_list->pmkids.npmkid = cpu_to_le32(pmkid_len - 1); } else err = -EINVAL; err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err); WL_TRACE("Exit\n"); return err; } static s32 brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev) { struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy); s32 err = 0; WL_TRACE("Enter\n"); if (!check_sys_up(wiphy)) return -EIO; memset(cfg_priv->pmk_list, 0, sizeof(*cfg_priv->pmk_list)); err = brcmf_update_pmklist(ndev, cfg_priv->pmk_list, err); WL_TRACE("Exit\n"); return err; } static struct cfg80211_ops wl_cfg80211_ops = { .change_virtual_intf = brcmf_cfg80211_change_iface, .scan = brcmf_cfg80211_scan, .set_wiphy_params = brcmf_cfg80211_set_wiphy_params, .join_ibss = brcmf_cfg80211_join_ibss, .leave_ibss = brcmf_cfg80211_leave_ibss, .get_station = brcmf_cfg80211_get_station, .set_tx_power = brcmf_cfg80211_set_tx_power, .get_tx_power = brcmf_cfg80211_get_tx_power, .add_key = brcmf_cfg80211_add_key, .del_key = brcmf_cfg80211_del_key, .get_key = brcmf_cfg80211_get_key, .set_default_key = brcmf_cfg80211_config_default_key, .set_default_mgmt_key = brcmf_cfg80211_config_default_mgmt_key, .set_power_mgmt = brcmf_cfg80211_set_power_mgmt, .set_bitrate_mask = brcmf_cfg80211_set_bitrate_mask, .connect = brcmf_cfg80211_connect, .disconnect = brcmf_cfg80211_disconnect, .suspend = brcmf_cfg80211_suspend, .resume = brcmf_cfg80211_resume, .set_pmksa = brcmf_cfg80211_set_pmksa, .del_pmksa = brcmf_cfg80211_del_pmksa, .flush_pmksa = brcmf_cfg80211_flush_pmksa }; static s32 brcmf_mode_to_nl80211_iftype(s32 mode) { s32 err = 0; switch (mode) { case WL_MODE_BSS: return NL80211_IFTYPE_STATION; case WL_MODE_IBSS: return NL80211_IFTYPE_ADHOC; default: return NL80211_IFTYPE_UNSPECIFIED; } return err; } static struct wireless_dev *brcmf_alloc_wdev(s32 sizeof_iface, struct device *ndev) { struct wireless_dev *wdev; s32 err = 0; wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); if (!wdev) return ERR_PTR(-ENOMEM); wdev->wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct brcmf_cfg80211_priv) + sizeof_iface); if (!wdev->wiphy) { WL_ERR("Could not allocate wiphy device\n"); err = -ENOMEM; goto wiphy_new_out; } set_wiphy_dev(wdev->wiphy, ndev); wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX; wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX; wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set * it as 11a by default. * This will be updated with * 11n phy tables in * "ifconfig up" * if phy has 11n capability */ wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wdev->wiphy->cipher_suites = __wl_cipher_suites; wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power * save mode * by default */ err = wiphy_register(wdev->wiphy); if (err < 0) { WL_ERR("Could not register wiphy device (%d)\n", err); goto wiphy_register_out; } return wdev; wiphy_register_out: wiphy_free(wdev->wiphy); wiphy_new_out: kfree(wdev); return ERR_PTR(err); } static void brcmf_free_wdev(struct brcmf_cfg80211_priv *cfg_priv) { struct wireless_dev *wdev = cfg_priv->wdev; if (!wdev) { WL_ERR("wdev is invalid\n"); return; } wiphy_unregister(wdev->wiphy); wiphy_free(wdev->wiphy); kfree(wdev); cfg_priv->wdev = NULL; } static bool brcmf_is_linkup(struct brcmf_cfg80211_priv *cfg_priv, const struct brcmf_event_msg *e) { u32 event = be32_to_cpu(e->event_type); u32 status = be32_to_cpu(e->status); if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) { WL_CONN("Processing set ssid\n"); cfg_priv->link_up = true; return true; } return false; } static bool brcmf_is_linkdown(struct brcmf_cfg80211_priv *cfg_priv, const struct brcmf_event_msg *e) { u32 event = be32_to_cpu(e->event_type); u16 flags = be16_to_cpu(e->flags); if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) { WL_CONN("Processing link down\n"); return true; } return false; } static bool brcmf_is_nonetwork(struct brcmf_cfg80211_priv *cfg_priv, const struct brcmf_event_msg *e) { u32 event = be32_to_cpu(e->event_type); u32 status = be32_to_cpu(e->status); if (event == BRCMF_E_LINK && status == BRCMF_E_STATUS_NO_NETWORKS) { WL_CONN("Processing Link %s & no network found\n", be16_to_cpu(e->flags) & BRCMF_EVENT_MSG_LINK ? "up" : "down"); return true; } if (event == BRCMF_E_SET_SSID && status != BRCMF_E_STATUS_SUCCESS) { WL_CONN("Processing connecting & no network found\n"); return true; } return false; } static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); kfree(conn_info->req_ie); conn_info->req_ie = NULL; conn_info->req_ie_len = 0; kfree(conn_info->resp_ie); conn_info->resp_ie = NULL; conn_info->resp_ie_len = 0; } static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_priv *cfg_priv) { struct net_device *ndev = cfg_to_ndev(cfg_priv); struct brcmf_cfg80211_assoc_ielen_le *assoc_info; struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); u32 req_len; u32 resp_len; s32 err = 0; brcmf_clear_assoc_ies(cfg_priv); err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg_priv->extra_buf, WL_ASSOC_INFO_MAX); if (err) { WL_ERR("could not get assoc info (%d)\n", err); return err; } assoc_info = (struct brcmf_cfg80211_assoc_ielen_le *)cfg_priv->extra_buf; req_len = le32_to_cpu(assoc_info->req_len); resp_len = le32_to_cpu(assoc_info->resp_len); if (req_len) { err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies", cfg_priv->extra_buf, WL_ASSOC_INFO_MAX); if (err) { WL_ERR("could not get assoc req (%d)\n", err); return err; } conn_info->req_ie_len = req_len; conn_info->req_ie = kmemdup(cfg_priv->extra_buf, conn_info->req_ie_len, GFP_KERNEL); } else { conn_info->req_ie_len = 0; conn_info->req_ie = NULL; } if (resp_len) { err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies", cfg_priv->extra_buf, WL_ASSOC_INFO_MAX); if (err) { WL_ERR("could not get assoc resp (%d)\n", err); return err; } conn_info->resp_ie_len = resp_len; conn_info->resp_ie = kmemdup(cfg_priv->extra_buf, conn_info->resp_ie_len, GFP_KERNEL); } else { conn_info->resp_ie_len = 0; conn_info->resp_ie = NULL; } WL_CONN("req len (%d) resp len (%d)\n", conn_info->req_ie_len, conn_info->resp_ie_len); return err; } static s32 brcmf_bss_roaming_done(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const struct brcmf_event_msg *e) { struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); struct wiphy *wiphy = cfg_to_wiphy(cfg_priv); struct brcmf_channel_info_le channel_le; struct ieee80211_channel *notify_channel; struct ieee80211_supported_band *band; u32 freq; s32 err = 0; u32 target_channel; WL_TRACE("Enter\n"); brcmf_get_assoc_ies(cfg_priv); brcmf_update_prof(cfg_priv, NULL, &e->addr, WL_PROF_BSSID); brcmf_update_bss_info(cfg_priv); brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_le, sizeof(channel_le)); target_channel = le32_to_cpu(channel_le.target_channel); WL_CONN("Roamed to channel %d\n", target_channel); if (target_channel <= CH_MAX_2G_CHANNEL) band = wiphy->bands[IEEE80211_BAND_2GHZ]; else band = wiphy->bands[IEEE80211_BAND_5GHZ]; freq = ieee80211_channel_to_frequency(target_channel, band->band); notify_channel = ieee80211_get_channel(wiphy, freq); cfg80211_roamed(ndev, notify_channel, (u8 *)brcmf_read_prof(cfg_priv, WL_PROF_BSSID), conn_info->req_ie, conn_info->req_ie_len, conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); WL_CONN("Report roaming result\n"); set_bit(WL_STATUS_CONNECTED, &cfg_priv->status); WL_TRACE("Exit\n"); return err; } static s32 brcmf_bss_connect_done(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const struct brcmf_event_msg *e, bool completed) { struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg_priv); s32 err = 0; WL_TRACE("Enter\n"); if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) { if (completed) { brcmf_get_assoc_ies(cfg_priv); brcmf_update_prof(cfg_priv, NULL, &e->addr, WL_PROF_BSSID); brcmf_update_bss_info(cfg_priv); } cfg80211_connect_result(ndev, (u8 *)brcmf_read_prof(cfg_priv, WL_PROF_BSSID), conn_info->req_ie, conn_info->req_ie_len, conn_info->resp_ie, conn_info->resp_ie_len, completed ? WLAN_STATUS_SUCCESS : WLAN_STATUS_AUTH_TIMEOUT, GFP_KERNEL); if (completed) set_bit(WL_STATUS_CONNECTED, &cfg_priv->status); WL_CONN("Report connect result - connection %s\n", completed ? "succeeded" : "failed"); } WL_TRACE("Exit\n"); return err; } static s32 brcmf_notify_connect_status(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { s32 err = 0; if (brcmf_is_linkup(cfg_priv, e)) { WL_CONN("Linkup\n"); if (brcmf_is_ibssmode(cfg_priv)) { brcmf_update_prof(cfg_priv, NULL, (void *)e->addr, WL_PROF_BSSID); wl_inform_ibss(cfg_priv, ndev, e->addr); cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL); clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); set_bit(WL_STATUS_CONNECTED, &cfg_priv->status); } else brcmf_bss_connect_done(cfg_priv, ndev, e, true); } else if (brcmf_is_linkdown(cfg_priv, e)) { WL_CONN("Linkdown\n"); if (brcmf_is_ibssmode(cfg_priv)) { clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); if (test_and_clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) brcmf_link_down(cfg_priv); } else { brcmf_bss_connect_done(cfg_priv, ndev, e, false); if (test_and_clear_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) { cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL); brcmf_link_down(cfg_priv); } } brcmf_init_prof(cfg_priv->profile); } else if (brcmf_is_nonetwork(cfg_priv, e)) { if (brcmf_is_ibssmode(cfg_priv)) clear_bit(WL_STATUS_CONNECTING, &cfg_priv->status); else brcmf_bss_connect_done(cfg_priv, ndev, e, false); } return err; } static s32 brcmf_notify_roaming_status(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { s32 err = 0; u32 event = be32_to_cpu(e->event_type); u32 status = be32_to_cpu(e->status); if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) { if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) brcmf_bss_roaming_done(cfg_priv, ndev, e); else brcmf_bss_connect_done(cfg_priv, ndev, e, true); } return err; } static s32 brcmf_notify_mic_status(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { u16 flags = be16_to_cpu(e->flags); enum nl80211_key_type key_type; if (flags & BRCMF_EVENT_MSG_GROUP) key_type = NL80211_KEYTYPE_GROUP; else key_type = NL80211_KEYTYPE_PAIRWISE; cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1, NULL, GFP_KERNEL); return 0; } static s32 brcmf_notify_scan_status(struct brcmf_cfg80211_priv *cfg_priv, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { struct brcmf_channel_info_le channel_inform_le; struct brcmf_scan_results_le *bss_list_le; u32 len = WL_SCAN_BUF_MAX; s32 err = 0; bool scan_abort = false; u32 scan_channel; WL_TRACE("Enter\n"); if (cfg_priv->iscan_on && cfg_priv->iscan_kickstart) { WL_TRACE("Exit\n"); return brcmf_wakeup_iscan(cfg_to_iscan(cfg_priv)); } if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg_priv->status)) { WL_ERR("Scan complete while device not scanning\n"); scan_abort = true; err = -EINVAL; goto scan_done_out; } err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_inform_le, sizeof(channel_inform_le)); if (err) { WL_ERR("scan busy (%d)\n", err); scan_abort = true; goto scan_done_out; } scan_channel = le32_to_cpu(channel_inform_le.scan_channel); if (scan_channel) WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel); cfg_priv->bss_list = cfg_priv->scan_results; bss_list_le = (struct brcmf_scan_results_le *) cfg_priv->bss_list; memset(cfg_priv->scan_results, 0, len); bss_list_le->buflen = cpu_to_le32(len); err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS, cfg_priv->scan_results, len); if (err) { WL_ERR("%s Scan_results error (%d)\n", ndev->name, err); err = -EINVAL; scan_abort = true; goto scan_done_out; } cfg_priv->scan_results->buflen = le32_to_cpu(bss_list_le->buflen); cfg_priv->scan_results->version = le32_to_cpu(bss_list_le->version); cfg_priv->scan_results->count = le32_to_cpu(bss_list_le->count); err = brcmf_inform_bss(cfg_priv); if (err) { scan_abort = true; goto scan_done_out; } scan_done_out: if (cfg_priv->scan_request) { WL_SCAN("calling cfg80211_scan_done\n"); cfg80211_scan_done(cfg_priv->scan_request, scan_abort); brcmf_set_mpc(ndev, 1); cfg_priv->scan_request = NULL; } WL_TRACE("Exit\n"); return err; } static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf) { conf->mode = (u32)-1; conf->frag_threshold = (u32)-1; conf->rts_threshold = (u32)-1; conf->retry_short = (u32)-1; conf->retry_long = (u32)-1; conf->tx_power = -1; } static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el) { memset(el, 0, sizeof(*el)); el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status; el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status; el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status; el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status; el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status; } static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_priv *cfg_priv) { kfree(cfg_priv->scan_results); cfg_priv->scan_results = NULL; kfree(cfg_priv->bss_info); cfg_priv->bss_info = NULL; kfree(cfg_priv->conf); cfg_priv->conf = NULL; kfree(cfg_priv->profile); cfg_priv->profile = NULL; kfree(cfg_priv->scan_req_int); cfg_priv->scan_req_int = NULL; kfree(cfg_priv->dcmd_buf); cfg_priv->dcmd_buf = NULL; kfree(cfg_priv->extra_buf); cfg_priv->extra_buf = NULL; kfree(cfg_priv->iscan); cfg_priv->iscan = NULL; kfree(cfg_priv->pmk_list); cfg_priv->pmk_list = NULL; } static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_priv *cfg_priv) { cfg_priv->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL); if (!cfg_priv->scan_results) goto init_priv_mem_out; cfg_priv->conf = kzalloc(sizeof(*cfg_priv->conf), GFP_KERNEL); if (!cfg_priv->conf) goto init_priv_mem_out; cfg_priv->profile = kzalloc(sizeof(*cfg_priv->profile), GFP_KERNEL); if (!cfg_priv->profile) goto init_priv_mem_out; cfg_priv->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); if (!cfg_priv->bss_info) goto init_priv_mem_out; cfg_priv->scan_req_int = kzalloc(sizeof(*cfg_priv->scan_req_int), GFP_KERNEL); if (!cfg_priv->scan_req_int) goto init_priv_mem_out; cfg_priv->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL); if (!cfg_priv->dcmd_buf) goto init_priv_mem_out; cfg_priv->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); if (!cfg_priv->extra_buf) goto init_priv_mem_out; cfg_priv->iscan = kzalloc(sizeof(*cfg_priv->iscan), GFP_KERNEL); if (!cfg_priv->iscan) goto init_priv_mem_out; cfg_priv->pmk_list = kzalloc(sizeof(*cfg_priv->pmk_list), GFP_KERNEL); if (!cfg_priv->pmk_list) goto init_priv_mem_out; return 0; init_priv_mem_out: brcmf_deinit_priv_mem(cfg_priv); return -ENOMEM; } /* * retrieve first queued event from head */ static struct brcmf_cfg80211_event_q *brcmf_deq_event( struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_event_q *e = NULL; spin_lock_irq(&cfg_priv->evt_q_lock); if (!list_empty(&cfg_priv->evt_q_list)) { e = list_first_entry(&cfg_priv->evt_q_list, struct brcmf_cfg80211_event_q, evt_q_list); list_del(&e->evt_q_list); } spin_unlock_irq(&cfg_priv->evt_q_lock); return e; } /* * push event to tail of the queue * * remark: this function may not sleep as it is called in atomic context. */ static s32 brcmf_enq_event(struct brcmf_cfg80211_priv *cfg_priv, u32 event, const struct brcmf_event_msg *msg) { struct brcmf_cfg80211_event_q *e; s32 err = 0; ulong flags; e = kzalloc(sizeof(struct brcmf_cfg80211_event_q), GFP_ATOMIC); if (!e) return -ENOMEM; e->etype = event; memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg)); spin_lock_irqsave(&cfg_priv->evt_q_lock, flags); list_add_tail(&e->evt_q_list, &cfg_priv->evt_q_list); spin_unlock_irqrestore(&cfg_priv->evt_q_lock, flags); return err; } static void brcmf_put_event(struct brcmf_cfg80211_event_q *e) { kfree(e); } static void brcmf_cfg80211_event_handler(struct work_struct *work) { struct brcmf_cfg80211_priv *cfg_priv = container_of(work, struct brcmf_cfg80211_priv, event_work); struct brcmf_cfg80211_event_q *e; e = brcmf_deq_event(cfg_priv); if (unlikely(!e)) { WL_ERR("event queue empty...\n"); return; } do { WL_INFO("event type (%d)\n", e->etype); if (cfg_priv->el.handler[e->etype]) cfg_priv->el.handler[e->etype](cfg_priv, cfg_to_ndev(cfg_priv), &e->emsg, e->edata); else WL_INFO("Unknown Event (%d): ignoring\n", e->etype); brcmf_put_event(e); } while ((e = brcmf_deq_event(cfg_priv))); } static void brcmf_init_eq(struct brcmf_cfg80211_priv *cfg_priv) { spin_lock_init(&cfg_priv->evt_q_lock); INIT_LIST_HEAD(&cfg_priv->evt_q_list); } static void brcmf_flush_eq(struct brcmf_cfg80211_priv *cfg_priv) { struct brcmf_cfg80211_event_q *e; spin_lock_irq(&cfg_priv->evt_q_lock); while (!list_empty(&cfg_priv->evt_q_list)) { e = list_first_entry(&cfg_priv->evt_q_list, struct brcmf_cfg80211_event_q, evt_q_list); list_del(&e->evt_q_list); kfree(e); } spin_unlock_irq(&cfg_priv->evt_q_lock); } static s32 wl_init_priv(struct brcmf_cfg80211_priv *cfg_priv) { s32 err = 0; cfg_priv->scan_request = NULL; cfg_priv->pwr_save = true; cfg_priv->iscan_on = true; /* iscan on & off switch. we enable iscan per default */ cfg_priv->roam_on = true; /* roam on & off switch. we enable roam per default */ cfg_priv->iscan_kickstart = false; cfg_priv->active_scan = true; /* we do active scan for specific scan per default */ cfg_priv->dongle_up = false; /* dongle is not up yet */ brcmf_init_eq(cfg_priv); err = brcmf_init_priv_mem(cfg_priv); if (err) return err; INIT_WORK(&cfg_priv->event_work, brcmf_cfg80211_event_handler); brcmf_init_eloop_handler(&cfg_priv->el); mutex_init(&cfg_priv->usr_sync); err = brcmf_init_iscan(cfg_priv); if (err) return err; brcmf_init_conf(cfg_priv->conf); brcmf_init_prof(cfg_priv->profile); brcmf_link_down(cfg_priv); return err; } static void wl_deinit_priv(struct brcmf_cfg80211_priv *cfg_priv) { cancel_work_sync(&cfg_priv->event_work); cfg_priv->dongle_up = false; /* dongle down */ brcmf_flush_eq(cfg_priv); brcmf_link_down(cfg_priv); brcmf_term_iscan(cfg_priv); brcmf_deinit_priv_mem(cfg_priv); } struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev, struct device *busdev, void *data) { struct wireless_dev *wdev; struct brcmf_cfg80211_priv *cfg_priv; struct brcmf_cfg80211_iface *ci; struct brcmf_cfg80211_dev *cfg_dev; s32 err = 0; if (!ndev) { WL_ERR("ndev is invalid\n"); return NULL; } cfg_dev = kzalloc(sizeof(struct brcmf_cfg80211_dev), GFP_KERNEL); if (!cfg_dev) return NULL; wdev = brcmf_alloc_wdev(sizeof(struct brcmf_cfg80211_iface), busdev); if (IS_ERR(wdev)) { kfree(cfg_dev); return NULL; } wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS); cfg_priv = wdev_to_cfg(wdev); cfg_priv->wdev = wdev; cfg_priv->pub = data; ci = (struct brcmf_cfg80211_iface *)&cfg_priv->ci; ci->cfg_priv = cfg_priv; ndev->ieee80211_ptr = wdev; SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; err = wl_init_priv(cfg_priv); if (err) { WL_ERR("Failed to init iwm_priv (%d)\n", err); goto cfg80211_attach_out; } brcmf_set_drvdata(cfg_dev, ci); return cfg_dev; cfg80211_attach_out: brcmf_free_wdev(cfg_priv); kfree(cfg_dev); return NULL; } void brcmf_cfg80211_detach(struct brcmf_cfg80211_dev *cfg_dev) { struct brcmf_cfg80211_priv *cfg_priv; cfg_priv = brcmf_priv_get(cfg_dev); wl_deinit_priv(cfg_priv); brcmf_free_wdev(cfg_priv); brcmf_set_drvdata(cfg_dev, NULL); kfree(cfg_dev); } void brcmf_cfg80211_event(struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { u32 event_type = be32_to_cpu(e->event_type); struct brcmf_cfg80211_priv *cfg_priv = ndev_to_cfg(ndev); if (!brcmf_enq_event(cfg_priv, event_type, e)) schedule_work(&cfg_priv->event_work); } static s32 brcmf_dongle_mode(struct net_device *ndev, s32 iftype) { s32 infra = 0; s32 err = 0; switch (iftype) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_WDS: WL_ERR("type (%d) : currently we do not support this mode\n", iftype); err = -EINVAL; return err; case NL80211_IFTYPE_ADHOC: infra = 0; break; case NL80211_IFTYPE_STATION: infra = 1; break; default: err = -EINVAL; WL_ERR("invalid type (%d)\n", iftype); return err; } err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra); if (err) { WL_ERR("WLC_SET_INFRA error (%d)\n", err); return err; } return 0; } static s32 brcmf_dongle_eventmsg(struct net_device *ndev) { /* Room for "event_msgs" + '\0' + bitvec */ s8 iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; s8 eventmask[BRCMF_EVENTING_MASK_LEN]; s32 err = 0; WL_TRACE("Enter\n"); /* Setup event_msgs */ brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, iovbuf, sizeof(iovbuf)); if (err) { WL_ERR("Get event_msgs error (%d)\n", err); goto dongle_eventmsg_out; } memcpy(eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN); setbit(eventmask, BRCMF_E_SET_SSID); setbit(eventmask, BRCMF_E_ROAM); setbit(eventmask, BRCMF_E_PRUNE); setbit(eventmask, BRCMF_E_AUTH); setbit(eventmask, BRCMF_E_REASSOC); setbit(eventmask, BRCMF_E_REASSOC_IND); setbit(eventmask, BRCMF_E_DEAUTH_IND); setbit(eventmask, BRCMF_E_DISASSOC_IND); setbit(eventmask, BRCMF_E_DISASSOC); setbit(eventmask, BRCMF_E_JOIN); setbit(eventmask, BRCMF_E_ASSOC_IND); setbit(eventmask, BRCMF_E_PSK_SUP); setbit(eventmask, BRCMF_E_LINK); setbit(eventmask, BRCMF_E_NDIS_LINK); setbit(eventmask, BRCMF_E_MIC_ERROR); setbit(eventmask, BRCMF_E_PMKID_CACHE); setbit(eventmask, BRCMF_E_TXFAIL); setbit(eventmask, BRCMF_E_JOIN_START); setbit(eventmask, BRCMF_E_SCAN_COMPLETE); brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); if (err) { WL_ERR("Set event_msgs error (%d)\n", err); goto dongle_eventmsg_out; } dongle_eventmsg_out: WL_TRACE("Exit\n"); return err; } static s32 brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout) { s8 iovbuf[32]; s32 err = 0; __le32 roamtrigger[2]; __le32 roam_delta[2]; __le32 bcn_to_le; __le32 roamvar_le; /* * Setup timeout if Beacons are lost and roam is * off to report link down */ if (roamvar) { bcn_to_le = cpu_to_le32(bcn_timeout); brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_to_le, sizeof(bcn_to_le), iovbuf, sizeof(iovbuf)); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); if (err) { WL_ERR("bcn_timeout error (%d)\n", err); goto dongle_rom_out; } } /* * Enable/Disable built-in roaming to allow supplicant * to take care of roaming */ WL_INFO("Internal Roaming = %s\n", roamvar ? "Off" : "On"); roamvar_le = cpu_to_le32(roamvar); brcmf_c_mkiovar("roam_off", (char *)&roamvar_le, sizeof(roamvar_le), iovbuf, sizeof(iovbuf)); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); if (err) { WL_ERR("roam_off error (%d)\n", err); goto dongle_rom_out; } roamtrigger[0] = cpu_to_le32(WL_ROAM_TRIGGER_LEVEL); roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_TRIGGER, (void *)roamtrigger, sizeof(roamtrigger)); if (err) { WL_ERR("WLC_SET_ROAM_TRIGGER error (%d)\n", err); goto dongle_rom_out; } roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA); roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_DELTA, (void *)roam_delta, sizeof(roam_delta)); if (err) { WL_ERR("WLC_SET_ROAM_DELTA error (%d)\n", err); goto dongle_rom_out; } dongle_rom_out: return err; } static s32 brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time, s32 scan_unassoc_time, s32 scan_passive_time) { s32 err = 0; __le32 scan_assoc_tm_le = cpu_to_le32(scan_assoc_time); __le32 scan_unassoc_tm_le = cpu_to_le32(scan_unassoc_time); __le32 scan_passive_tm_le = cpu_to_le32(scan_passive_time); err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_CHANNEL_TIME, &scan_assoc_tm_le, sizeof(scan_assoc_tm_le)); if (err) { if (err == -EOPNOTSUPP) WL_INFO("Scan assoc time is not supported\n"); else WL_ERR("Scan assoc time error (%d)\n", err); goto dongle_scantime_out; } err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_UNASSOC_TIME, &scan_unassoc_tm_le, sizeof(scan_unassoc_tm_le)); if (err) { if (err == -EOPNOTSUPP) WL_INFO("Scan unassoc time is not supported\n"); else WL_ERR("Scan unassoc time error (%d)\n", err); goto dongle_scantime_out; } err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_PASSIVE_TIME, &scan_passive_tm_le, sizeof(scan_passive_tm_le)); if (err) { if (err == -EOPNOTSUPP) WL_INFO("Scan passive time is not supported\n"); else WL_ERR("Scan passive time error (%d)\n", err); goto dongle_scantime_out; } dongle_scantime_out: return err; } static s32 wl_update_wiphybands(struct brcmf_cfg80211_priv *cfg_priv) { struct wiphy *wiphy; s32 phy_list; s8 phy; s32 err = 0; err = brcmf_exec_dcmd(cfg_to_ndev(cfg_priv), BRCM_GET_PHYLIST, &phy_list, sizeof(phy_list)); if (err) { WL_ERR("error (%d)\n", err); return err; } phy = ((char *)&phy_list)[1]; WL_INFO("%c phy\n", phy); if (phy == 'n' || phy == 'a') { wiphy = cfg_to_wiphy(cfg_priv); wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n; } return err; } static s32 brcmf_dongle_probecap(struct brcmf_cfg80211_priv *cfg_priv) { return wl_update_wiphybands(cfg_priv); } static s32 brcmf_config_dongle(struct brcmf_cfg80211_priv *cfg_priv) { struct net_device *ndev; struct wireless_dev *wdev; s32 power_mode; s32 err = 0; if (cfg_priv->dongle_up) return err; ndev = cfg_to_ndev(cfg_priv); wdev = ndev->ieee80211_ptr; brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME, WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME); err = brcmf_dongle_eventmsg(ndev); if (err) goto default_conf_out; power_mode = cfg_priv->pwr_save ? PM_FAST : PM_OFF; err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode); if (err) goto default_conf_out; WL_INFO("power save set to %s\n", (power_mode ? "enabled" : "disabled")); err = brcmf_dongle_roam(ndev, (cfg_priv->roam_on ? 0 : 1), WL_BEACON_TIMEOUT); if (err) goto default_conf_out; err = brcmf_dongle_mode(ndev, wdev->iftype); if (err && err != -EINPROGRESS) goto default_conf_out; err = brcmf_dongle_probecap(cfg_priv); if (err) goto default_conf_out; /* -EINPROGRESS: Call commit handler */ default_conf_out: cfg_priv->dongle_up = true; return err; } static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_priv *cfg_priv) { char buf[10+IFNAMSIZ]; struct dentry *fd; s32 err = 0; sprintf(buf, "netdev:%s", cfg_to_ndev(cfg_priv)->name); cfg_priv->debugfsdir = debugfs_create_dir(buf, cfg_to_wiphy(cfg_priv)->debugfsdir); fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg_priv->debugfsdir, (u16 *)&cfg_priv->profile->beacon_interval); if (!fd) { err = -ENOMEM; goto err_out; } fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg_priv->debugfsdir, (u8 *)&cfg_priv->profile->dtim_period); if (!fd) { err = -ENOMEM; goto err_out; } err_out: return err; } static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_priv *cfg_priv) { debugfs_remove_recursive(cfg_priv->debugfsdir); cfg_priv->debugfsdir = NULL; } static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_priv *cfg_priv) { s32 err = 0; set_bit(WL_STATUS_READY, &cfg_priv->status); brcmf_debugfs_add_netdev_params(cfg_priv); err = brcmf_config_dongle(cfg_priv); if (err) return err; brcmf_invoke_iscan(cfg_priv); return err; } static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_priv *cfg_priv) { /* * While going down, if associated with AP disassociate * from AP to save power */ if ((test_bit(WL_STATUS_CONNECTED, &cfg_priv->status) || test_bit(WL_STATUS_CONNECTING, &cfg_priv->status)) && test_bit(WL_STATUS_READY, &cfg_priv->status)) { WL_INFO("Disassociating from AP"); brcmf_link_down(cfg_priv); /* Make sure WPA_Supplicant receives all the event generated due to DISASSOC call to the fw to keep the state fw and WPA_Supplicant state consistent */ brcmf_delay(500); } set_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status); brcmf_term_iscan(cfg_priv); if (cfg_priv->scan_request) { cfg80211_scan_done(cfg_priv->scan_request, true); /* May need to perform this to cover rmmod */ /* wl_set_mpc(cfg_to_ndev(wl), 1); */ cfg_priv->scan_request = NULL; } clear_bit(WL_STATUS_READY, &cfg_priv->status); clear_bit(WL_STATUS_SCANNING, &cfg_priv->status); clear_bit(WL_STATUS_SCAN_ABORTING, &cfg_priv->status); brcmf_debugfs_remove_netdev(cfg_priv); return 0; } s32 brcmf_cfg80211_up(struct brcmf_cfg80211_dev *cfg_dev) { struct brcmf_cfg80211_priv *cfg_priv; s32 err = 0; cfg_priv = brcmf_priv_get(cfg_dev); mutex_lock(&cfg_priv->usr_sync); err = __brcmf_cfg80211_up(cfg_priv); mutex_unlock(&cfg_priv->usr_sync); return err; } s32 brcmf_cfg80211_down(struct brcmf_cfg80211_dev *cfg_dev) { struct brcmf_cfg80211_priv *cfg_priv; s32 err = 0; cfg_priv = brcmf_priv_get(cfg_dev); mutex_lock(&cfg_priv->usr_sync); err = __brcmf_cfg80211_down(cfg_priv); mutex_unlock(&cfg_priv->usr_sync); return err; } static __used s32 brcmf_add_ie(struct brcmf_cfg80211_priv *cfg_priv, u8 t, u8 l, u8 *v) { struct brcmf_cfg80211_ie *ie = &cfg_priv->ie; s32 err = 0; if (ie->offset + l + 2 > WL_TLV_INFO_MAX) { WL_ERR("ei crosses buffer boundary\n"); return -ENOSPC; } ie->buf[ie->offset] = t; ie->buf[ie->offset + 1] = l; memcpy(&ie->buf[ie->offset + 2], v, l); ie->offset += l + 2; return err; }
gpl-2.0
NooNameR/Sense4.0-kernel
drivers/gpu/drm/i915/i915_mem.c
4598
9493
/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*- */ /* * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm.h" #include "i915_drm.h" #include "i915_drv.h" /* This memory manager is integrated into the global/local lru * mechanisms used by the clients. Specifically, it operates by * setting the 'in_use' fields of the global LRU to indicate whether * this region is privately allocated to a client. * * This does require the client to actually respect that field. * * Currently no effort is made to allocate 'private' memory in any * clever way - the LRU information isn't used to determine which * block to allocate, and the ring is drained prior to allocations -- * in other words allocation is expensive. */ static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv; struct drm_tex_region *list; unsigned shift, nr; unsigned start; unsigned end; unsigned i; int age; shift = dev_priv->tex_lru_log_granularity; nr = I915_NR_TEX_REGIONS; start = p->start >> shift; end = (p->start + p->size - 1) >> shift; age = ++sarea_priv->texAge; list = sarea_priv->texList; /* Mark the regions with the new flag and update their age. Move * them to head of list to preserve LRU semantics. */ for (i = start; i <= end; i++) { list[i].in_use = in_use; list[i].age = age; /* remove_from_list(i) */ list[(unsigned)list[i].next].prev = list[i].prev; list[(unsigned)list[i].prev].next = list[i].next; /* insert_at_head(list, i) */ list[i].prev = nr; list[i].next = list[nr].next; list[(unsigned)list[nr].next].prev = i; list[nr].next = i; } } /* Very simple allocator for agp memory, working on a static range * already mapped into each client's address space. */ static struct mem_block *split_block(struct mem_block *p, int start, int size, struct drm_file *file_priv) { /* Maybe cut off the start of an existing block */ if (start > p->start) { struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL); if (!newblock) goto out; newblock->start = start; newblock->size = p->size - (start - p->start); newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size -= newblock->size; p = newblock; } /* Maybe cut off the end of an existing block */ if (size < p->size) { struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL); if (!newblock) goto out; newblock->start = start + size; newblock->size = p->size - size; newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size = size; } out: /* Our block is in the middle */ p->file_priv = file_priv; return p; } static struct mem_block *alloc_block(struct mem_block *heap, int size, int align2, struct drm_file *file_priv) { struct mem_block *p; int mask = (1 << align2) - 1; for (p = heap->next; p != heap; p = p->next) { int start = (p->start + mask) & ~mask; if (p->file_priv == NULL && start + size <= p->start + p->size) return split_block(p, start, size, file_priv); } return NULL; } static struct mem_block *find_block(struct mem_block *heap, int start) { struct mem_block *p; for (p = heap->next; p != heap; p = p->next) if (p->start == start) return p; return NULL; } static void free_block(struct mem_block *p) { p->file_priv = NULL; /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ if (p->next->file_priv == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; kfree(q); } if (p->prev->file_priv == NULL) { struct mem_block *q = p->prev; q->size += p->size; q->next = p->next; q->next->prev = q; kfree(p); } } /* Initialize. How to check for an uninitialized heap? */ static int init_heap(struct mem_block **heap, int start, int size) { struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); if (!blocks) return -ENOMEM; *heap = kmalloc(sizeof(**heap), GFP_KERNEL); if (!*heap) { kfree(blocks); return -ENOMEM; } blocks->start = start; blocks->size = size; blocks->file_priv = NULL; blocks->next = blocks->prev = *heap; memset(*heap, 0, sizeof(**heap)); (*heap)->file_priv = (struct drm_file *) - 1; (*heap)->next = (*heap)->prev = blocks; return 0; } /* Free all blocks associated with the releasing file. */ void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv, struct mem_block *heap) { struct mem_block *p; if (!heap || !heap->next) return; for (p = heap->next; p != heap; p = p->next) { if (p->file_priv == file_priv) { p->file_priv = NULL; mark_block(dev, p, 0); } } /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ for (p = heap->next; p != heap; p = p->next) { while (p->file_priv == NULL && p->next->file_priv == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; kfree(q); } } } /* Shutdown. */ void i915_mem_takedown(struct mem_block **heap) { struct mem_block *p; if (!*heap) return; for (p = (*heap)->next; p != *heap;) { struct mem_block *q = p; p = p->next; kfree(q); } kfree(*heap); *heap = NULL; } static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region) { switch (region) { case I915_MEM_REGION_AGP: return &dev_priv->agp_heap; default: return NULL; } } /* IOCTL HANDLERS */ int i915_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_mem_alloc_t *alloc = data; struct mem_block *block, **heap; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } heap = get_heap(dev_priv, alloc->region); if (!heap || !*heap) return -EFAULT; /* Make things easier on ourselves: all allocations at least * 4k aligned. */ if (alloc->alignment < 12) alloc->alignment = 12; block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv); if (!block) return -ENOMEM; mark_block(dev, block, 1); if (DRM_COPY_TO_USER(alloc->region_offset, &block->start, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return -EFAULT; } return 0; } int i915_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_mem_free_t *memfree = data; struct mem_block *block, **heap; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } heap = get_heap(dev_priv, memfree->region); if (!heap || !*heap) return -EFAULT; block = find_block(*heap, memfree->region_offset); if (!block) return -EFAULT; if (block->file_priv != file_priv) return -EPERM; mark_block(dev, block, 0); free_block(block); return 0; } int i915_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_mem_init_heap_t *initheap = data; struct mem_block **heap; if (!dev_priv) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } heap = get_heap(dev_priv, initheap->region); if (!heap) return -EFAULT; if (*heap) { DRM_ERROR("heap already initialized?"); return -EFAULT; } return init_heap(heap, initheap->start, initheap->size); } int i915_mem_destroy_heap( struct drm_device *dev, void *data, struct drm_file *file_priv ) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_mem_destroy_heap_t *destroyheap = data; struct mem_block **heap; if ( !dev_priv ) { DRM_ERROR( "called with no initialization\n" ); return -EINVAL; } heap = get_heap( dev_priv, destroyheap->region ); if (!heap) { DRM_ERROR("get_heap failed"); return -EFAULT; } if (!*heap) { DRM_ERROR("heap not initialized?"); return -EFAULT; } i915_mem_takedown( heap ); return 0; }
gpl-2.0
qubex22/AK-OnePone
net/ipv6/netfilter/ip6table_mangle.c
7414
4130
/* * IPv6 packet mangling table, a port of the IPv4 mangle table to IPv6 * * Copyright (C) 2000-2001 by Harald Welte <laforge@gnumonks.org> * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/slab.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("ip6tables mangle table"); #define MANGLE_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \ (1 << NF_INET_LOCAL_IN) | \ (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT) | \ (1 << NF_INET_POST_ROUTING)) static const struct xt_table packet_mangler = { .name = "mangle", .valid_hooks = MANGLE_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV6, .priority = NF_IP6_PRI_MANGLE, }; static unsigned int ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out) { unsigned int ret; struct in6_addr saddr, daddr; u_int8_t hop_limit; u_int32_t flowlabel, mark; #if 0 /* root is playing with raw sockets. */ if (skb->len < sizeof(struct iphdr) || ip_hdrlen(skb) < sizeof(struct iphdr)) { if (net_ratelimit()) pr_warning("ip6t_hook: happy cracking.\n"); return NF_ACCEPT; } #endif /* save source/dest address, mark, hoplimit, flowlabel, priority, */ memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr)); memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr)); mark = skb->mark; hop_limit = ipv6_hdr(skb)->hop_limit; /* flowlabel and prio (includes version, which shouldn't change either */ flowlabel = *((u_int32_t *)ipv6_hdr(skb)); ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out, dev_net(out)->ipv6.ip6table_mangle); if (ret != NF_DROP && ret != NF_STOLEN && (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) || memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) || skb->mark != mark || ipv6_hdr(skb)->hop_limit != hop_limit || flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP; return ret; } /* The work comes in here from netfilter.c. */ static unsigned int ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, int (*okfn)(struct sk_buff *)) { if (hook == NF_INET_LOCAL_OUT) return ip6t_mangle_out(skb, out); if (hook == NF_INET_POST_ROUTING) return ip6t_do_table(skb, hook, in, out, dev_net(out)->ipv6.ip6table_mangle); /* INPUT/FORWARD */ return ip6t_do_table(skb, hook, in, out, dev_net(in)->ipv6.ip6table_mangle); } static struct nf_hook_ops *mangle_ops __read_mostly; static int __net_init ip6table_mangle_net_init(struct net *net) { struct ip6t_replace *repl; repl = ip6t_alloc_initial_table(&packet_mangler); if (repl == NULL) return -ENOMEM; net->ipv6.ip6table_mangle = ip6t_register_table(net, &packet_mangler, repl); kfree(repl); if (IS_ERR(net->ipv6.ip6table_mangle)) return PTR_ERR(net->ipv6.ip6table_mangle); return 0; } static void __net_exit ip6table_mangle_net_exit(struct net *net) { ip6t_unregister_table(net, net->ipv6.ip6table_mangle); } static struct pernet_operations ip6table_mangle_net_ops = { .init = ip6table_mangle_net_init, .exit = ip6table_mangle_net_exit, }; static int __init ip6table_mangle_init(void) { int ret; ret = register_pernet_subsys(&ip6table_mangle_net_ops); if (ret < 0) return ret; /* Register hooks */ mangle_ops = xt_hook_link(&packet_mangler, ip6table_mangle_hook); if (IS_ERR(mangle_ops)) { ret = PTR_ERR(mangle_ops); goto cleanup_table; } return ret; cleanup_table: unregister_pernet_subsys(&ip6table_mangle_net_ops); return ret; } static void __exit ip6table_mangle_fini(void) { xt_hook_unlink(&packet_mangler, mangle_ops); unregister_pernet_subsys(&ip6table_mangle_net_ops); } module_init(ip6table_mangle_init); module_exit(ip6table_mangle_fini);
gpl-2.0
ryrzy/g2_4.2.2
fs/yaffs2/yaffs_verify.c
7926
13283
/* * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. * * Copyright (C) 2002-2010 Aleph One Ltd. * for Toby Churchill Ltd and Brightstar Engineering * * Created by Charles Manning <charles@aleph1.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "yaffs_verify.h" #include "yaffs_trace.h" #include "yaffs_bitmap.h" #include "yaffs_getblockinfo.h" #include "yaffs_nand.h" int yaffs_skip_verification(struct yaffs_dev *dev) { dev = dev; return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL)); } static int yaffs_skip_full_verification(struct yaffs_dev *dev) { dev = dev; return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL)); } static int yaffs_skip_nand_verification(struct yaffs_dev *dev) { dev = dev; return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND)); } static const char *block_state_name[] = { "Unknown", "Needs scanning", "Scanning", "Empty", "Allocating", "Full", "Dirty", "Checkpoint", "Collecting", "Dead" }; void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n) { int actually_used; int in_use; if (yaffs_skip_verification(dev)) return; /* Report illegal runtime states */ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES) yaffs_trace(YAFFS_TRACE_VERIFY, "Block %d has undefined state %d", n, bi->block_state); switch (bi->block_state) { case YAFFS_BLOCK_STATE_UNKNOWN: case YAFFS_BLOCK_STATE_SCANNING: case YAFFS_BLOCK_STATE_NEEDS_SCANNING: yaffs_trace(YAFFS_TRACE_VERIFY, "Block %d has bad run-state %s", n, block_state_name[bi->block_state]); } /* Check pages in use and soft deletions are legal */ actually_used = bi->pages_in_use - bi->soft_del_pages; if (bi->pages_in_use < 0 || bi->pages_in_use > dev->param.chunks_per_block || bi->soft_del_pages < 0 || bi->soft_del_pages > dev->param.chunks_per_block || actually_used < 0 || actually_used > dev->param.chunks_per_block) yaffs_trace(YAFFS_TRACE_VERIFY, "Block %d has illegal values pages_in_used %d soft_del_pages %d", n, bi->pages_in_use, bi->soft_del_pages); /* Check chunk bitmap legal */ in_use = yaffs_count_chunk_bits(dev, n); if (in_use != bi->pages_in_use) yaffs_trace(YAFFS_TRACE_VERIFY, "Block %d has inconsistent values pages_in_use %d counted chunk bits %d", n, bi->pages_in_use, in_use); } void yaffs_verify_collected_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n) { yaffs_verify_blk(dev, bi, n); /* After collection the block should be in the erased state */ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING && bi->block_state != YAFFS_BLOCK_STATE_EMPTY) { yaffs_trace(YAFFS_TRACE_ERROR, "Block %d is in state %d after gc, should be erased", n, bi->block_state); } } void yaffs_verify_blocks(struct yaffs_dev *dev) { int i; int state_count[YAFFS_NUMBER_OF_BLOCK_STATES]; int illegal_states = 0; if (yaffs_skip_verification(dev)) return; memset(state_count, 0, sizeof(state_count)); for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) { struct yaffs_block_info *bi = yaffs_get_block_info(dev, i); yaffs_verify_blk(dev, bi, i); if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES) state_count[bi->block_state]++; else illegal_states++; } yaffs_trace(YAFFS_TRACE_VERIFY, "Block summary"); yaffs_trace(YAFFS_TRACE_VERIFY, "%d blocks have illegal states", illegal_states); if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1) yaffs_trace(YAFFS_TRACE_VERIFY, "Too many allocating blocks"); for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++) yaffs_trace(YAFFS_TRACE_VERIFY, "%s %d blocks", block_state_name[i], state_count[i]); if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT]) yaffs_trace(YAFFS_TRACE_VERIFY, "Checkpoint block count wrong dev %d count %d", dev->blocks_in_checkpt, state_count[YAFFS_BLOCK_STATE_CHECKPOINT]); if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY]) yaffs_trace(YAFFS_TRACE_VERIFY, "Erased block count wrong dev %d count %d", dev->n_erased_blocks, state_count[YAFFS_BLOCK_STATE_EMPTY]); if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1) yaffs_trace(YAFFS_TRACE_VERIFY, "Too many collecting blocks %d (max is 1)", state_count[YAFFS_BLOCK_STATE_COLLECTING]); } /* * Verify the object header. oh must be valid, but obj and tags may be NULL in which * case those tests will not be performed. */ void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh, struct yaffs_ext_tags *tags, int parent_check) { if (obj && yaffs_skip_verification(obj->my_dev)) return; if (!(tags && obj && oh)) { yaffs_trace(YAFFS_TRACE_VERIFY, "Verifying object header tags %p obj %p oh %p", tags, obj, oh); return; } if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN || oh->type > YAFFS_OBJECT_TYPE_MAX) yaffs_trace(YAFFS_TRACE_VERIFY, "Obj %d header type is illegal value 0x%x", tags->obj_id, oh->type); if (tags->obj_id != obj->obj_id) yaffs_trace(YAFFS_TRACE_VERIFY, "Obj %d header mismatch obj_id %d", tags->obj_id, obj->obj_id); /* * Check that the object's parent ids match if parent_check requested. * * Tests do not apply to the root object. */ if (parent_check && tags->obj_id > 1 && !obj->parent) yaffs_trace(YAFFS_TRACE_VERIFY, "Obj %d header mismatch parent_id %d obj->parent is NULL", tags->obj_id, oh->parent_obj_id); if (parent_check && obj->parent && oh->parent_obj_id != obj->parent->obj_id && (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED || obj->parent->obj_id != YAFFS_OBJECTID_DELETED)) yaffs_trace(YAFFS_TRACE_VERIFY, "Obj %d header mismatch parent_id %d parent_obj_id %d", tags->obj_id, oh->parent_obj_id, obj->parent->obj_id); if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */ yaffs_trace(YAFFS_TRACE_VERIFY, "Obj %d header name is NULL", obj->obj_id); if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff) /* Trashed name */ yaffs_trace(YAFFS_TRACE_VERIFY, "Obj %d header name is 0xFF", obj->obj_id); } void yaffs_verify_file(struct yaffs_obj *obj) { int required_depth; int actual_depth; u32 last_chunk; u32 x; u32 i; struct yaffs_dev *dev; struct yaffs_ext_tags tags; struct yaffs_tnode *tn; u32 obj_id; if (!obj) return; if (yaffs_skip_verification(obj->my_dev)) return; dev = obj->my_dev; obj_id = obj->obj_id; /* Check file size is consistent with tnode depth */ last_chunk = obj->variant.file_variant.file_size / dev->data_bytes_per_chunk + 1; x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS; required_depth = 0; while (x > 0) { x >>= YAFFS_TNODES_INTERNAL_BITS; required_depth++; } actual_depth = obj->variant.file_variant.top_level; /* Check that the chunks in the tnode tree are all correct. * We do this by scanning through the tnode tree and * checking the tags for every chunk match. */ if (yaffs_skip_nand_verification(dev)) return; for (i = 1; i <= last_chunk; i++) { tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i); if (tn) { u32 the_chunk = yaffs_get_group_base(dev, tn, i); if (the_chunk > 0) { yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL, &tags); if (tags.obj_id != obj_id || tags.chunk_id != i) yaffs_trace(YAFFS_TRACE_VERIFY, "Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)", obj_id, i, the_chunk, tags.obj_id, tags.chunk_id); } } } } void yaffs_verify_link(struct yaffs_obj *obj) { if (obj && yaffs_skip_verification(obj->my_dev)) return; /* Verify sane equivalent object */ } void yaffs_verify_symlink(struct yaffs_obj *obj) { if (obj && yaffs_skip_verification(obj->my_dev)) return; /* Verify symlink string */ } void yaffs_verify_special(struct yaffs_obj *obj) { if (obj && yaffs_skip_verification(obj->my_dev)) return; } void yaffs_verify_obj(struct yaffs_obj *obj) { struct yaffs_dev *dev; u32 chunk_min; u32 chunk_max; u32 chunk_id_ok; u32 chunk_in_range; u32 chunk_wrongly_deleted; u32 chunk_valid; if (!obj) return; if (obj->being_created) return; dev = obj->my_dev; if (yaffs_skip_verification(dev)) return; /* Check sane object header chunk */ chunk_min = dev->internal_start_block * dev->param.chunks_per_block; chunk_max = (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1; chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min && ((unsigned)(obj->hdr_chunk)) <= chunk_max); chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0); chunk_valid = chunk_in_range && yaffs_check_chunk_bit(dev, obj->hdr_chunk / dev->param.chunks_per_block, obj->hdr_chunk % dev->param.chunks_per_block); chunk_wrongly_deleted = chunk_in_range && !chunk_valid; if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted)) yaffs_trace(YAFFS_TRACE_VERIFY, "Obj %d has chunk_id %d %s %s", obj->obj_id, obj->hdr_chunk, chunk_id_ok ? "" : ",out of range", chunk_wrongly_deleted ? ",marked as deleted" : ""); if (chunk_valid && !yaffs_skip_nand_verification(dev)) { struct yaffs_ext_tags tags; struct yaffs_obj_hdr *oh; u8 *buffer = yaffs_get_temp_buffer(dev, __LINE__); oh = (struct yaffs_obj_hdr *)buffer; yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags); yaffs_verify_oh(obj, oh, &tags, 1); yaffs_release_temp_buffer(dev, buffer, __LINE__); } /* Verify it has a parent */ if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) { yaffs_trace(YAFFS_TRACE_VERIFY, "Obj %d has parent pointer %p which does not look like an object", obj->obj_id, obj->parent); } /* Verify parent is a directory */ if (obj->parent && obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { yaffs_trace(YAFFS_TRACE_VERIFY, "Obj %d's parent is not a directory (type %d)", obj->obj_id, obj->parent->variant_type); } switch (obj->variant_type) { case YAFFS_OBJECT_TYPE_FILE: yaffs_verify_file(obj); break; case YAFFS_OBJECT_TYPE_SYMLINK: yaffs_verify_symlink(obj); break; case YAFFS_OBJECT_TYPE_DIRECTORY: yaffs_verify_dir(obj); break; case YAFFS_OBJECT_TYPE_HARDLINK: yaffs_verify_link(obj); break; case YAFFS_OBJECT_TYPE_SPECIAL: yaffs_verify_special(obj); break; case YAFFS_OBJECT_TYPE_UNKNOWN: default: yaffs_trace(YAFFS_TRACE_VERIFY, "Obj %d has illegaltype %d", obj->obj_id, obj->variant_type); break; } } void yaffs_verify_objects(struct yaffs_dev *dev) { struct yaffs_obj *obj; int i; struct list_head *lh; if (yaffs_skip_verification(dev)) return; /* Iterate through the objects in each hash entry */ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { list_for_each(lh, &dev->obj_bucket[i].list) { if (lh) { obj = list_entry(lh, struct yaffs_obj, hash_link); yaffs_verify_obj(obj); } } } } void yaffs_verify_obj_in_dir(struct yaffs_obj *obj) { struct list_head *lh; struct yaffs_obj *list_obj; int count = 0; if (!obj) { yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify"); YBUG(); return; } if (yaffs_skip_verification(obj->my_dev)) return; if (!obj->parent) { yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent" ); YBUG(); return; } if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory"); YBUG(); } /* Iterate through the objects in each hash entry */ list_for_each(lh, &obj->parent->variant.dir_variant.children) { if (lh) { list_obj = list_entry(lh, struct yaffs_obj, siblings); yaffs_verify_obj(list_obj); if (obj == list_obj) count++; } } if (count != 1) { yaffs_trace(YAFFS_TRACE_ALWAYS, "Object in directory %d times", count); YBUG(); } } void yaffs_verify_dir(struct yaffs_obj *directory) { struct list_head *lh; struct yaffs_obj *list_obj; if (!directory) { YBUG(); return; } if (yaffs_skip_full_verification(directory->my_dev)) return; if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { yaffs_trace(YAFFS_TRACE_ALWAYS, "Directory has wrong type: %d", directory->variant_type); YBUG(); } /* Iterate through the objects in each hash entry */ list_for_each(lh, &directory->variant.dir_variant.children) { if (lh) { list_obj = list_entry(lh, struct yaffs_obj, siblings); if (list_obj->parent != directory) { yaffs_trace(YAFFS_TRACE_ALWAYS, "Object in directory list has wrong parent %p", list_obj->parent); YBUG(); } yaffs_verify_obj_in_dir(list_obj); } } } static int yaffs_free_verification_failures; void yaffs_verify_free_chunks(struct yaffs_dev *dev) { int counted; int difference; if (yaffs_skip_verification(dev)) return; counted = yaffs_count_free_chunks(dev); difference = dev->n_free_chunks - counted; if (difference) { yaffs_trace(YAFFS_TRACE_ALWAYS, "Freechunks verification failure %d %d %d", dev->n_free_chunks, counted, difference); yaffs_free_verification_failures++; } } int yaffs_verify_file_sane(struct yaffs_obj *in) { in = in; return YAFFS_OK; }
gpl-2.0
TeamBliss-Devices/android_kernel_google_msm
drivers/net/irda/actisys-sir.c
13302
7692
/********************************************************************* * * Filename: actisys.c * Version: 1.1 * Description: Implementation for the ACTiSYS IR-220L and IR-220L+ * dongles * Status: Beta. * Authors: Dag Brattli <dagb@cs.uit.no> (initially) * Jean Tourrilhes <jt@hpl.hp.com> (new version) * Martin Diehl <mad@mdiehl.de> (new version for sir_dev) * Created at: Wed Oct 21 20:02:35 1998 * Modified at: Sun Oct 27 22:02:13 2002 * Modified by: Martin Diehl <mad@mdiehl.de> * * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved. * Copyright (c) 1999 Jean Tourrilhes * Copyright (c) 2002 Martin Diehl * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ /* * Changelog * * 0.8 -> 0.9999 - Jean * o New initialisation procedure : much safer and correct * o New procedure the change speed : much faster and simpler * o Other cleanups & comments * Thanks to Lichen Wang @ Actisys for his excellent help... * * 1.0 -> 1.1 - Martin Diehl * modified for new sir infrastructure */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <net/irda/irda.h> #include "sir-dev.h" /* * Define the timing of the pulses we send to the dongle (to reset it, and * to toggle speeds). Basically, the limit here is the propagation speed of * the signals through the serial port, the dongle being much faster. Any * serial port support 115 kb/s, so we are sure that pulses 8.5 us wide can * go through cleanly . If you are on the wild side, you can try to lower * this value (Actisys recommended me 2 us, and 0 us work for me on a P233!) */ #define MIN_DELAY 10 /* 10 us to be on the conservative side */ static int actisys_open(struct sir_dev *); static int actisys_close(struct sir_dev *); static int actisys_change_speed(struct sir_dev *, unsigned); static int actisys_reset(struct sir_dev *); /* These are the baudrates supported, in the order available */ /* Note : the 220L doesn't support 38400, but we will fix that below */ static unsigned baud_rates[] = { 9600, 19200, 57600, 115200, 38400 }; #define MAX_SPEEDS ARRAY_SIZE(baud_rates) static struct dongle_driver act220l = { .owner = THIS_MODULE, .driver_name = "Actisys ACT-220L", .type = IRDA_ACTISYS_DONGLE, .open = actisys_open, .close = actisys_close, .reset = actisys_reset, .set_speed = actisys_change_speed, }; static struct dongle_driver act220l_plus = { .owner = THIS_MODULE, .driver_name = "Actisys ACT-220L+", .type = IRDA_ACTISYS_PLUS_DONGLE, .open = actisys_open, .close = actisys_close, .reset = actisys_reset, .set_speed = actisys_change_speed, }; static int __init actisys_sir_init(void) { int ret; /* First, register an Actisys 220L dongle */ ret = irda_register_dongle(&act220l); if (ret < 0) return ret; /* Now, register an Actisys 220L+ dongle */ ret = irda_register_dongle(&act220l_plus); if (ret < 0) { irda_unregister_dongle(&act220l); return ret; } return 0; } static void __exit actisys_sir_cleanup(void) { /* We have to remove both dongles */ irda_unregister_dongle(&act220l_plus); irda_unregister_dongle(&act220l); } static int actisys_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Set the speeds we can accept */ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; /* Remove support for 38400 if this is not a 220L+ dongle */ if (dev->dongle_drv->type == IRDA_ACTISYS_DONGLE) qos->baud_rate.bits &= ~IR_38400; qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */ irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; } static int actisys_close(struct sir_dev *dev) { /* Power off the dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); return 0; } /* * Function actisys_change_speed (task) * * Change speed of the ACTiSYS IR-220L and IR-220L+ type IrDA dongles. * To cycle through the available baud rates, pulse RTS low for a few us. * * First, we reset the dongle to always start from a known state. * Then, we cycle through the speeds by pulsing RTS low and then up. * The dongle allow us to pulse quite fast, se we can set speed in one go, * which is must faster ( < 100 us) and less complex than what is found * in some other dongle drivers... * Note that even if the new speed is the same as the current speed, * we reassert the speed. This make sure that things are all right, * and it's fast anyway... * By the way, this function will work for both type of dongles, * because the additional speed is at the end of the sequence... */ static int actisys_change_speed(struct sir_dev *dev, unsigned speed) { int ret = 0; int i = 0; IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __func__, speed, dev->speed); /* dongle was already resetted from irda_request state machine, * we are in known state (dongle default) */ /* * Now, we can set the speed requested. Send RTS pulses until we * reach the target speed */ for (i = 0; i < MAX_SPEEDS; i++) { if (speed == baud_rates[i]) { dev->speed = speed; break; } /* Set RTS low for 10 us */ sirdev_set_dtr_rts(dev, TRUE, FALSE); udelay(MIN_DELAY); /* Set RTS high for 10 us */ sirdev_set_dtr_rts(dev, TRUE, TRUE); udelay(MIN_DELAY); } /* Check if life is sweet... */ if (i >= MAX_SPEEDS) { actisys_reset(dev); ret = -EINVAL; /* This should not happen */ } /* Basta lavoro, on se casse d'ici... */ return ret; } /* * Function actisys_reset (task) * * Reset the Actisys type dongle. Warning, this function must only be * called with a process context! * * We need to do two things in this function : * o first make sure that the dongle is in a state where it can operate * o second put the dongle in a know state * * The dongle is powered of the RTS and DTR lines. In the dongle, there * is a big capacitor to accommodate the current spikes. This capacitor * takes a least 50 ms to be charged. In theory, the Bios set those lines * up, so by the time we arrive here we should be set. It doesn't hurt * to be on the conservative side, so we will wait... * <Martin : move above comment to irda_config_fsm> * Then, we set the speed to 9600 b/s to get in a known state (see in * change_speed for details). It is needed because the IrDA stack * has tried to set the speed immediately after our first return, * so before we can be sure the dongle is up and running. */ static int actisys_reset(struct sir_dev *dev) { /* Reset the dongle : set DTR low for 10 us */ sirdev_set_dtr_rts(dev, FALSE, TRUE); udelay(MIN_DELAY); /* Go back to normal mode */ sirdev_set_dtr_rts(dev, TRUE, TRUE); dev->speed = 9600; /* That's the default */ return 0; } MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no> - Jean Tourrilhes <jt@hpl.hp.com>"); MODULE_DESCRIPTION("ACTiSYS IR-220L and IR-220L+ dongle driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("irda-dongle-2"); /* IRDA_ACTISYS_DONGLE */ MODULE_ALIAS("irda-dongle-3"); /* IRDA_ACTISYS_PLUS_DONGLE */ module_init(actisys_sir_init); module_exit(actisys_sir_cleanup);
gpl-2.0
Metallium-Devices/android_kernel_motorola_msm8226
drivers/net/irda/actisys-sir.c
13302
7692
/********************************************************************* * * Filename: actisys.c * Version: 1.1 * Description: Implementation for the ACTiSYS IR-220L and IR-220L+ * dongles * Status: Beta. * Authors: Dag Brattli <dagb@cs.uit.no> (initially) * Jean Tourrilhes <jt@hpl.hp.com> (new version) * Martin Diehl <mad@mdiehl.de> (new version for sir_dev) * Created at: Wed Oct 21 20:02:35 1998 * Modified at: Sun Oct 27 22:02:13 2002 * Modified by: Martin Diehl <mad@mdiehl.de> * * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved. * Copyright (c) 1999 Jean Tourrilhes * Copyright (c) 2002 Martin Diehl * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ /* * Changelog * * 0.8 -> 0.9999 - Jean * o New initialisation procedure : much safer and correct * o New procedure the change speed : much faster and simpler * o Other cleanups & comments * Thanks to Lichen Wang @ Actisys for his excellent help... * * 1.0 -> 1.1 - Martin Diehl * modified for new sir infrastructure */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <net/irda/irda.h> #include "sir-dev.h" /* * Define the timing of the pulses we send to the dongle (to reset it, and * to toggle speeds). Basically, the limit here is the propagation speed of * the signals through the serial port, the dongle being much faster. Any * serial port support 115 kb/s, so we are sure that pulses 8.5 us wide can * go through cleanly . If you are on the wild side, you can try to lower * this value (Actisys recommended me 2 us, and 0 us work for me on a P233!) */ #define MIN_DELAY 10 /* 10 us to be on the conservative side */ static int actisys_open(struct sir_dev *); static int actisys_close(struct sir_dev *); static int actisys_change_speed(struct sir_dev *, unsigned); static int actisys_reset(struct sir_dev *); /* These are the baudrates supported, in the order available */ /* Note : the 220L doesn't support 38400, but we will fix that below */ static unsigned baud_rates[] = { 9600, 19200, 57600, 115200, 38400 }; #define MAX_SPEEDS ARRAY_SIZE(baud_rates) static struct dongle_driver act220l = { .owner = THIS_MODULE, .driver_name = "Actisys ACT-220L", .type = IRDA_ACTISYS_DONGLE, .open = actisys_open, .close = actisys_close, .reset = actisys_reset, .set_speed = actisys_change_speed, }; static struct dongle_driver act220l_plus = { .owner = THIS_MODULE, .driver_name = "Actisys ACT-220L+", .type = IRDA_ACTISYS_PLUS_DONGLE, .open = actisys_open, .close = actisys_close, .reset = actisys_reset, .set_speed = actisys_change_speed, }; static int __init actisys_sir_init(void) { int ret; /* First, register an Actisys 220L dongle */ ret = irda_register_dongle(&act220l); if (ret < 0) return ret; /* Now, register an Actisys 220L+ dongle */ ret = irda_register_dongle(&act220l_plus); if (ret < 0) { irda_unregister_dongle(&act220l); return ret; } return 0; } static void __exit actisys_sir_cleanup(void) { /* We have to remove both dongles */ irda_unregister_dongle(&act220l_plus); irda_unregister_dongle(&act220l); } static int actisys_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Set the speeds we can accept */ qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; /* Remove support for 38400 if this is not a 220L+ dongle */ if (dev->dongle_drv->type == IRDA_ACTISYS_DONGLE) qos->baud_rate.bits &= ~IR_38400; qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */ irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; } static int actisys_close(struct sir_dev *dev) { /* Power off the dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); return 0; } /* * Function actisys_change_speed (task) * * Change speed of the ACTiSYS IR-220L and IR-220L+ type IrDA dongles. * To cycle through the available baud rates, pulse RTS low for a few us. * * First, we reset the dongle to always start from a known state. * Then, we cycle through the speeds by pulsing RTS low and then up. * The dongle allow us to pulse quite fast, se we can set speed in one go, * which is must faster ( < 100 us) and less complex than what is found * in some other dongle drivers... * Note that even if the new speed is the same as the current speed, * we reassert the speed. This make sure that things are all right, * and it's fast anyway... * By the way, this function will work for both type of dongles, * because the additional speed is at the end of the sequence... */ static int actisys_change_speed(struct sir_dev *dev, unsigned speed) { int ret = 0; int i = 0; IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __func__, speed, dev->speed); /* dongle was already resetted from irda_request state machine, * we are in known state (dongle default) */ /* * Now, we can set the speed requested. Send RTS pulses until we * reach the target speed */ for (i = 0; i < MAX_SPEEDS; i++) { if (speed == baud_rates[i]) { dev->speed = speed; break; } /* Set RTS low for 10 us */ sirdev_set_dtr_rts(dev, TRUE, FALSE); udelay(MIN_DELAY); /* Set RTS high for 10 us */ sirdev_set_dtr_rts(dev, TRUE, TRUE); udelay(MIN_DELAY); } /* Check if life is sweet... */ if (i >= MAX_SPEEDS) { actisys_reset(dev); ret = -EINVAL; /* This should not happen */ } /* Basta lavoro, on se casse d'ici... */ return ret; } /* * Function actisys_reset (task) * * Reset the Actisys type dongle. Warning, this function must only be * called with a process context! * * We need to do two things in this function : * o first make sure that the dongle is in a state where it can operate * o second put the dongle in a know state * * The dongle is powered of the RTS and DTR lines. In the dongle, there * is a big capacitor to accommodate the current spikes. This capacitor * takes a least 50 ms to be charged. In theory, the Bios set those lines * up, so by the time we arrive here we should be set. It doesn't hurt * to be on the conservative side, so we will wait... * <Martin : move above comment to irda_config_fsm> * Then, we set the speed to 9600 b/s to get in a known state (see in * change_speed for details). It is needed because the IrDA stack * has tried to set the speed immediately after our first return, * so before we can be sure the dongle is up and running. */ static int actisys_reset(struct sir_dev *dev) { /* Reset the dongle : set DTR low for 10 us */ sirdev_set_dtr_rts(dev, FALSE, TRUE); udelay(MIN_DELAY); /* Go back to normal mode */ sirdev_set_dtr_rts(dev, TRUE, TRUE); dev->speed = 9600; /* That's the default */ return 0; } MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no> - Jean Tourrilhes <jt@hpl.hp.com>"); MODULE_DESCRIPTION("ACTiSYS IR-220L and IR-220L+ dongle driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("irda-dongle-2"); /* IRDA_ACTISYS_DONGLE */ MODULE_ALIAS("irda-dongle-3"); /* IRDA_ACTISYS_PLUS_DONGLE */ module_init(actisys_sir_init); module_exit(actisys_sir_cleanup);
gpl-2.0
ambikadash/linux-fqt
arch/arm/mach-shmobile/setup-emev2.c
247
2019
/* * Emma Mobile EV2 processor support * * Copyright (C) 2012 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/clk-provider.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/of_platform.h> #include <mach/common.h> #include <mach/emev2.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> static struct map_desc emev2_io_desc[] __initdata = { #ifdef CONFIG_SMP /* 2M mapping for SCU + L2 controller */ { .virtual = 0xf0000000, .pfn = __phys_to_pfn(0x1e000000), .length = SZ_2M, .type = MT_DEVICE }, #endif }; void __init emev2_map_io(void) { iotable_init(emev2_io_desc, ARRAY_SIZE(emev2_io_desc)); } void __init emev2_init_delay(void) { shmobile_setup_delay(533, 1, 3); /* Cortex-A9 @ 533MHz */ } static void __init emev2_add_standard_devices_dt(void) { #ifdef CONFIG_COMMON_CLK of_clk_init(NULL); #else emev2_clock_init(); #endif of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } static const char *emev2_boards_compat_dt[] __initdata = { "renesas,emev2", NULL, }; DT_MACHINE_START(EMEV2_DT, "Generic Emma Mobile EV2 (Flattened Device Tree)") .smp = smp_ops(emev2_smp_ops), .map_io = emev2_map_io, .init_early = emev2_init_delay, .init_machine = emev2_add_standard_devices_dt, .init_late = shmobile_init_late, .dt_compat = emev2_boards_compat_dt, MACHINE_END
gpl-2.0
SteveLinCH/linux
sound/pci/hda/patch_si3054.c
503
9195
/* * Universal Interface for Intel High Definition Audio Codec * * HD audio interface patch for Silicon Labs 3054/5 modem codec * * Copyright (c) 2005 Sasha Khapyorsky <sashak@alsa-project.org> * Takashi Iwai <tiwai@suse.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include "hda_codec.h" #include "hda_local.h" /* si3054 verbs */ #define SI3054_VERB_READ_NODE 0x900 #define SI3054_VERB_WRITE_NODE 0x100 /* si3054 nodes (registers) */ #define SI3054_EXTENDED_MID 2 #define SI3054_LINE_RATE 3 #define SI3054_LINE_LEVEL 4 #define SI3054_GPIO_CFG 5 #define SI3054_GPIO_POLARITY 6 #define SI3054_GPIO_STICKY 7 #define SI3054_GPIO_WAKEUP 8 #define SI3054_GPIO_STATUS 9 #define SI3054_GPIO_CONTROL 10 #define SI3054_MISC_AFE 11 #define SI3054_CHIPID 12 #define SI3054_LINE_CFG1 13 #define SI3054_LINE_STATUS 14 #define SI3054_DC_TERMINATION 15 #define SI3054_LINE_CONFIG 16 #define SI3054_CALLPROG_ATT 17 #define SI3054_SQ_CONTROL 18 #define SI3054_MISC_CONTROL 19 #define SI3054_RING_CTRL1 20 #define SI3054_RING_CTRL2 21 /* extended MID */ #define SI3054_MEI_READY 0xf /* line level */ #define SI3054_ATAG_MASK 0x00f0 #define SI3054_DTAG_MASK 0xf000 /* GPIO bits */ #define SI3054_GPIO_OH 0x0001 #define SI3054_GPIO_CID 0x0002 /* chipid and revisions */ #define SI3054_CHIPID_CODEC_REV_MASK 0x000f #define SI3054_CHIPID_DAA_REV_MASK 0x00f0 #define SI3054_CHIPID_INTERNATIONAL 0x0100 #define SI3054_CHIPID_DAA_ID 0x0f00 #define SI3054_CHIPID_CODEC_ID (1<<12) /* si3054 codec registers (nodes) access macros */ #define GET_REG(codec,reg) (snd_hda_codec_read(codec,reg,0,SI3054_VERB_READ_NODE,0)) #define SET_REG(codec,reg,val) (snd_hda_codec_write(codec,reg,0,SI3054_VERB_WRITE_NODE,val)) #define SET_REG_CACHE(codec,reg,val) \ snd_hda_codec_write_cache(codec,reg,0,SI3054_VERB_WRITE_NODE,val) struct si3054_spec { unsigned international; }; /* * Modem mixer */ #define PRIVATE_VALUE(reg,mask) ((reg<<16)|(mask&0xffff)) #define PRIVATE_REG(val) ((val>>16)&0xffff) #define PRIVATE_MASK(val) (val&0xffff) #define si3054_switch_info snd_ctl_boolean_mono_info static int si3054_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uvalue) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); u16 reg = PRIVATE_REG(kcontrol->private_value); u16 mask = PRIVATE_MASK(kcontrol->private_value); uvalue->value.integer.value[0] = (GET_REG(codec, reg)) & mask ? 1 : 0 ; return 0; } static int si3054_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uvalue) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); u16 reg = PRIVATE_REG(kcontrol->private_value); u16 mask = PRIVATE_MASK(kcontrol->private_value); if (uvalue->value.integer.value[0]) SET_REG_CACHE(codec, reg, (GET_REG(codec, reg)) | mask); else SET_REG_CACHE(codec, reg, (GET_REG(codec, reg)) & ~mask); return 0; } #define SI3054_KCONTROL(kname,reg,mask) { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = kname, \ .subdevice = HDA_SUBDEV_NID_FLAG | reg, \ .info = si3054_switch_info, \ .get = si3054_switch_get, \ .put = si3054_switch_put, \ .private_value = PRIVATE_VALUE(reg,mask), \ } static const struct snd_kcontrol_new si3054_modem_mixer[] = { SI3054_KCONTROL("Off-hook Switch", SI3054_GPIO_CONTROL, SI3054_GPIO_OH), SI3054_KCONTROL("Caller ID Switch", SI3054_GPIO_CONTROL, SI3054_GPIO_CID), {} }; static int si3054_build_controls(struct hda_codec *codec) { return snd_hda_add_new_ctls(codec, si3054_modem_mixer); } /* * PCM callbacks */ static int si3054_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { u16 val; SET_REG(codec, SI3054_LINE_RATE, substream->runtime->rate); val = GET_REG(codec, SI3054_LINE_LEVEL); val &= 0xff << (8 * (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)); val |= ((stream_tag & 0xf) << 4) << (8 * (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)); SET_REG(codec, SI3054_LINE_LEVEL, val); snd_hda_codec_setup_stream(codec, hinfo->nid, stream_tag, 0, format); return 0; } static int si3054_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { static unsigned int rates[] = { 8000, 9600, 16000 }; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; substream->runtime->hw.period_bytes_min = 80; return snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); } static const struct hda_pcm_stream si3054_pcm = { .substreams = 1, .channels_min = 1, .channels_max = 1, .nid = 0x1, .rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_KNOT, .formats = SNDRV_PCM_FMTBIT_S16_LE, .maxbps = 16, .ops = { .open = si3054_pcm_open, .prepare = si3054_pcm_prepare, }, }; static int si3054_build_pcms(struct hda_codec *codec) { struct hda_pcm *info; info = snd_hda_codec_pcm_new(codec, "Si3054 Modem"); if (!info) return -ENOMEM; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = si3054_pcm; info->stream[SNDRV_PCM_STREAM_CAPTURE] = si3054_pcm; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = codec->core.mfg; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = codec->core.mfg; info->pcm_type = HDA_PCM_TYPE_MODEM; return 0; } /* * Init part */ static int si3054_init(struct hda_codec *codec) { struct si3054_spec *spec = codec->spec; unsigned wait_count; u16 val; if (snd_hdac_regmap_add_vendor_verb(&codec->core, SI3054_VERB_WRITE_NODE)) return -ENOMEM; snd_hda_codec_write(codec, AC_NODE_ROOT, 0, AC_VERB_SET_CODEC_RESET, 0); snd_hda_codec_write(codec, codec->core.mfg, 0, AC_VERB_SET_STREAM_FORMAT, 0); SET_REG(codec, SI3054_LINE_RATE, 9600); SET_REG(codec, SI3054_LINE_LEVEL, SI3054_DTAG_MASK|SI3054_ATAG_MASK); SET_REG(codec, SI3054_EXTENDED_MID, 0); wait_count = 10; do { msleep(2); val = GET_REG(codec, SI3054_EXTENDED_MID); } while ((val & SI3054_MEI_READY) != SI3054_MEI_READY && wait_count--); if((val&SI3054_MEI_READY) != SI3054_MEI_READY) { codec_err(codec, "si3054: cannot initialize. EXT MID = %04x\n", val); /* let's pray that this is no fatal error */ /* return -EACCES; */ } SET_REG(codec, SI3054_GPIO_POLARITY, 0xffff); SET_REG(codec, SI3054_GPIO_CFG, 0x0); SET_REG(codec, SI3054_MISC_AFE, 0); SET_REG(codec, SI3054_LINE_CFG1,0x200); if((GET_REG(codec,SI3054_LINE_STATUS) & (1<<6)) == 0) { codec_dbg(codec, "Link Frame Detect(FDT) is not ready (line status: %04x)\n", GET_REG(codec,SI3054_LINE_STATUS)); } spec->international = GET_REG(codec, SI3054_CHIPID) & SI3054_CHIPID_INTERNATIONAL; return 0; } static void si3054_free(struct hda_codec *codec) { kfree(codec->spec); } /* */ static const struct hda_codec_ops si3054_patch_ops = { .build_controls = si3054_build_controls, .build_pcms = si3054_build_pcms, .init = si3054_init, .free = si3054_free, }; static int patch_si3054(struct hda_codec *codec) { struct si3054_spec *spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->spec = spec; codec->patch_ops = si3054_patch_ops; return 0; } /* * patch entries */ static const struct hda_device_id snd_hda_id_si3054[] = { HDA_CODEC_ENTRY(0x163c3055, "Si3054", patch_si3054), HDA_CODEC_ENTRY(0x163c3155, "Si3054", patch_si3054), HDA_CODEC_ENTRY(0x11c13026, "Si3054", patch_si3054), HDA_CODEC_ENTRY(0x11c13055, "Si3054", patch_si3054), HDA_CODEC_ENTRY(0x11c13155, "Si3054", patch_si3054), HDA_CODEC_ENTRY(0x10573055, "Si3054", patch_si3054), HDA_CODEC_ENTRY(0x10573057, "Si3054", patch_si3054), HDA_CODEC_ENTRY(0x10573155, "Si3054", patch_si3054), /* VIA HDA on Clevo m540 */ HDA_CODEC_ENTRY(0x11063288, "Si3054", patch_si3054), /* Asus A8J Modem (SM56) */ HDA_CODEC_ENTRY(0x15433155, "Si3054", patch_si3054), /* LG LW20 modem */ HDA_CODEC_ENTRY(0x18540018, "Si3054", patch_si3054), {} }; MODULE_DEVICE_TABLE(hdaudio, snd_hda_id_si3054); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Si3054 HD-audio modem codec"); static struct hda_codec_driver si3054_driver = { .id = snd_hda_id_si3054, }; module_hda_codec_driver(si3054_driver);
gpl-2.0
fortuna-dev/android_kernel_samsung_fortuna-common
drivers/media/platform/msm/vpu/vpu_vb2_queue.c
759
10375
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "VPU, %s: " fmt, __func__ #include <linux/msm_ion.h> #include "vpu_ioctl_internal.h" #include "vpu_configuration.h" #include "vpu_translate.h" #include "vpu_channel.h" #include "vpu_v4l2.h" /* * Videobuf2 callbacks */ #define MIN_NUM_VPU_BUFFERS 2 #define MAX_NUM_VPU_BUFFERS 32 static void vpu_vb2_ops_wait_prepare(struct vb2_queue *q) { struct vpu_dev_session *session = (struct vpu_dev_session *) vb2_get_drv_priv(q); int port = get_queue_port_number(q); if (port >= 0) mutex_unlock(&session->que_lock[port]); } static void vpu_vb2_ops_wait_finish(struct vb2_queue *q) { struct vpu_dev_session *session = (struct vpu_dev_session *) vb2_get_drv_priv(q); int port = get_queue_port_number(q); if (port >= 0) mutex_lock(&session->que_lock[port]); } /* * Return number of buffers and planes per buffer given current format * * called by vb2_reqbufs (confirms # of buffers & planes) */ static int vpu_vb2_ops_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { struct vpu_dev_session *session = (struct vpu_dev_session *) vb2_get_drv_priv(q); int i = 0; int port = 0; int min_buffers = MIN_NUM_VPU_BUFFERS; port = get_queue_port_number(q); if (port < 0) return -EINVAL; *num_planes = min_t(typeof(session->port_info[port].format.num_planes), session->port_info[port].format.num_planes, VIDEO_MAX_PLANES); for (i = 0; i < *num_planes; i++) { sizes[i] = 0; /* not needed */ alloc_ctxs[i] = (void *) session; } if (port == INPUT_PORT && session->port_info[port].scan_mode == LINESCANINTERLACED) min_buffers = 4; /* 4 buffers minimum for interlaced input */ if (*num_buffers < min_buffers) *num_buffers = min_buffers; else if (*num_buffers > MAX_NUM_VPU_BUFFERS) *num_buffers = MAX_NUM_VPU_BUFFERS; return 0; } static void vpu_vb2_ops_buf_cleanup(struct vb2_buffer *vb); /* prototype */ /* * Used to map buffers/planes if memory was not mapped. * Unmapped memory is checked before calling vb2_qbuf. * * When vb2_qbuf or vb2_prepare_buf ioctl on a new buffer is called, get_userptr * is called on each plane, then buf_init is called. * So it's job is IOMAPPING user buffers. Buffers are in DEQUED state when this * is called */ static int vpu_vb2_ops_buf_init(struct vb2_buffer *vb) { struct vpu_dev_session *session = (struct vpu_dev_session *) vb2_get_drv_priv(vb->vb2_queue); struct vpu_buffer *vpu_buf = to_vpu_buffer(vb); bool secure; int i, port, ret = 0; port = get_queue_port_number(vb->vb2_queue); if (port < 0) return -EINVAL; secure = session->port_info[port].secure_content ? true : false; for (i = 0; i < vb->num_planes; i++) { if (!vpu_buf->planes[i].new_plane) continue; vpu_buf->planes[i].new_plane = 0; if (!vpu_buf->planes[i].mem_cookie) { vpu_buf->planes[i].mem_cookie = vpu_mem_create_handle( session->core->resources.mem_client); if (!vpu_buf->planes[i].mem_cookie) { ret = -ENOMEM; goto err_buf_init; } } ret = vpu_mem_map_fd(vpu_buf->planes[i].mem_cookie, vpu_buf->planes[i].fd, vpu_buf->planes[i].length, vpu_buf->planes[i].data_offset, secure); if (ret) { vpu_buf->valid_addresses_mask &= ~ADDR_VALID_VPU; goto err_buf_init; } else { vpu_buf->planes[i].mapped_address[ADDR_INDEX_VPU] = vpu_mem_addr(vpu_buf->planes[i].mem_cookie, MEM_VPU_ID); vpu_buf->valid_addresses_mask |= ADDR_VALID_VPU; } } return 0; err_buf_init: vpu_vb2_ops_buf_cleanup(vb); return ret; } /* * Only called if session is streaming. * Commits any pending configuration changes, and sends buffer to IPC channel. * * called to pass ownership of buffer to driver (its status becomes ACTIVE just * before calling this). * It's called in beginning of vb2_streamon to pass all QUEUED buffers, or at * end vb2_qbuf if device is already streaming * Function cannot fail. So prepare/init should make any required checks if * needed */ static void vpu_vb2_ops_buf_queue(struct vb2_buffer *vb) { struct vpu_buffer *vpu_buf; struct vb2_queue *q = vb->vb2_queue; struct vpu_dev_session *session = (struct vpu_dev_session *) vb2_get_drv_priv(q); int ret = 0, port; vpu_buf = to_vpu_buffer(vb); port = get_queue_port_number(q); if (port < 0) { pr_err("Invalid buffer queue\n"); return; } if (session->streaming_state != ALL_STREAMING || !list_empty(&session->pending_list[port])) { /* * pending list tracks buffers which were queued by client but * not yet passed to fw, waiting for session to stream on * end-to-end. Access to this list is protected by port mutex */ INIT_LIST_HEAD(&vpu_buf->buffers_entry); list_add_tail(&vpu_buf->buffers_entry, &session->pending_list[port]); } else { if (port == INPUT_PORT) ret = vpu_hw_session_empty_buffer(session->id, translate_port_id(port), vpu_buf); else ret = vpu_hw_session_fill_buffer(session->id, translate_port_id(port), vpu_buf); if (ret) { pr_err("buf_queue fail, returning buffer\n"); vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); } } } /* * Frees all mapped memory for buffer * * Called when buffer memory is about to be cleaned up (but before put_userptr). * One way to initiate this is by calling vb2_reqbufs with count == 0 (or * vb2_queue_release) */ static void vpu_vb2_ops_buf_cleanup(struct vb2_buffer *vb) { struct vpu_buffer *vpu_buf; int i; pr_debug("unmap buffer #%d from iommu\n", vb->v4l2_buf.index); vpu_buf = to_vpu_buffer(vb); for (i = 0; i < vb->num_planes; i++) { if (vpu_buf->planes[i].mem_cookie) vpu_mem_destroy_handle(vpu_buf->planes[i].mem_cookie); memset(&vpu_buf->planes[i], 0, sizeof(struct vpu_plane)); } } /* * In vb2_streamon, after all QUEUED buffers are made ACTIVE, this is called and * passed number of queued (ACTIVE) buffers. * After function returns with success, vb2_queue status becomes streaming */ static int vpu_vb2_ops_start_streaming(struct vb2_queue *q, unsigned int count) { return 0; /* does nothing but required by vb2 framework */ } /* * Flushes all buffers passed to a port * * called to inform that streaming will be stopped. Immediately after this queue * streaming status is cleared. * All QUEUED or DONE/ERRORed buffers are moved to DEQUED * Can be called from vb2_queue_release or vb2_streamoff * Use to flush all buffers passed to a port */ static int vpu_vb2_ops_stop_streaming(struct vb2_queue *q) { struct vpu_dev_session *session = (struct vpu_dev_session *) vb2_get_drv_priv(q); int port = get_queue_port_number(q); pr_debug("called for port %d\n", port); if (port < 0) return -EINVAL; /* Flush/Retrieve all queued buffers */ vpu_vb2_flush_buffers(session, port); return 0; } static struct vb2_ops vpu_vb2_ops = { .queue_setup = vpu_vb2_ops_queue_setup, .wait_prepare = vpu_vb2_ops_wait_prepare, .wait_finish = vpu_vb2_ops_wait_finish, .buf_init = vpu_vb2_ops_buf_init, .buf_prepare = NULL, .buf_queue = vpu_vb2_ops_buf_queue, .buf_finish = NULL, .buf_cleanup = vpu_vb2_ops_buf_cleanup, .start_streaming = vpu_vb2_ops_start_streaming, .stop_streaming = vpu_vb2_ops_stop_streaming, }; /* * Videobuf2 memops (not used but required by vb2 framework) */ static void *vpu_vb2_mem_ops_get_userptr(void *alloc_ctx, unsigned long vaddr, unsigned long size, int write) { return (void *) 0xD15EA5E; /* return any non-null value */ } static void vpu_vb2_mem_ops_put_userptr(void *buf_priv) {} static struct vb2_mem_ops vpu_vb2_mem_ops = { .get_userptr = vpu_vb2_mem_ops_get_userptr, .put_userptr = vpu_vb2_mem_ops_put_userptr, }; /* * Header defined vb2 helper functions */ int vpu_vb2_queue_init(struct vb2_queue *q, enum v4l2_buf_type type, void *pdata) { int ret = 0; memset(q, 0, sizeof(*q)); q->type = type; q->io_modes = VB2_USERPTR; q->drv_priv = pdata; q->buf_struct_size = sizeof(struct vpu_buffer); q->ops = &vpu_vb2_ops; q->mem_ops = &vpu_vb2_mem_ops; q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; ret = vb2_queue_init(q); if (ret < 0) pr_err("init vb2 queue (type = %d) fails\n", type); return ret; } int vpu_vb2_flush_buffers(struct vpu_dev_session *session, int port) { int i, ret = 0; struct vb2_queue *q = &session->vbqueue[port]; struct vpu_buffer *buff, *n; enum flush_buf_type flush_port = (port == INPUT_PORT) ? CH_FLUSH_IN_BUF : CH_FLUSH_OUT_BUF; /* retrieve any buffers on pending list (not sent to fw yet) */ list_for_each_entry_safe(buff, n, &session->pending_list[port], buffers_entry) { vb2_buffer_done(&buff->vb, VB2_BUF_STATE_ERROR); list_del(&buff->buffers_entry); } /* return if no buffers remain with FW */ if (!atomic_read(&q->queued_count)) return 0; /* pause, if session is streaming */ if (session->streaming_state == ALL_STREAMING) { ret = vpu_hw_session_pause(session->id); if (ret) { pr_err("Session Pause failed\n"); return ret; } } /* Flush buffers from FW */ pr_debug("Flushing port %d buffers from FW\n", port); ret = vpu_hw_session_flush(session->id, translate_port_id(port), flush_port); if (ret) pr_err("port %d flush failed\n", port); /* resume, if session is streaming */ if (session->streaming_state == ALL_STREAMING) if (vpu_hw_session_resume(session->id)) pr_err("Session Resume failed\n"); if (!atomic_read(&q->queued_count)) return 0; /* Forced retrieve of buffers not returned by FW. Should never happen */ pr_warn("Forced retrieve of %d buffers from port %d\n", atomic_read(&q->queued_count), port); for (i = 0; i < q->num_buffers; i++) { if (q->bufs[i] && q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR); } return ret; }
gpl-2.0
andrewoko-odion/linux
sound/soc/codecs/ts3a227e.c
759
10456
/* * TS3A227E Autonomous Audio Accessory Detection and Configuration Switch * * Copyright (C) 2014 Google, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/input.h> #include <linux/module.h> #include <linux/of_gpio.h> #include <linux/regmap.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/soc.h> #include "ts3a227e.h" struct ts3a227e { struct device *dev; struct regmap *regmap; struct snd_soc_jack *jack; bool plugged; bool mic_present; unsigned int buttons_held; int irq; }; /* Button values to be reported on the jack */ static const int ts3a227e_buttons[] = { SND_JACK_BTN_0, SND_JACK_BTN_1, SND_JACK_BTN_2, SND_JACK_BTN_3, }; #define TS3A227E_NUM_BUTTONS 4 #define TS3A227E_JACK_MASK (SND_JACK_HEADPHONE | \ SND_JACK_MICROPHONE | \ SND_JACK_BTN_0 | \ SND_JACK_BTN_1 | \ SND_JACK_BTN_2 | \ SND_JACK_BTN_3) /* TS3A227E registers */ #define TS3A227E_REG_DEVICE_ID 0x00 #define TS3A227E_REG_INTERRUPT 0x01 #define TS3A227E_REG_KP_INTERRUPT 0x02 #define TS3A227E_REG_INTERRUPT_DISABLE 0x03 #define TS3A227E_REG_SETTING_1 0x04 #define TS3A227E_REG_SETTING_2 0x05 #define TS3A227E_REG_SETTING_3 0x06 #define TS3A227E_REG_SWITCH_CONTROL_1 0x07 #define TS3A227E_REG_SWITCH_CONTROL_2 0x08 #define TS3A227E_REG_SWITCH_STATUS_1 0x09 #define TS3A227E_REG_SWITCH_STATUS_2 0x0a #define TS3A227E_REG_ACCESSORY_STATUS 0x0b #define TS3A227E_REG_ADC_OUTPUT 0x0c #define TS3A227E_REG_KP_THRESHOLD_1 0x0d #define TS3A227E_REG_KP_THRESHOLD_2 0x0e #define TS3A227E_REG_KP_THRESHOLD_3 0x0f /* TS3A227E_REG_INTERRUPT 0x01 */ #define INS_REM_EVENT 0x01 #define DETECTION_COMPLETE_EVENT 0x02 /* TS3A227E_REG_KP_INTERRUPT 0x02 */ #define PRESS_MASK(idx) (0x01 << (2 * (idx))) #define RELEASE_MASK(idx) (0x02 << (2 * (idx))) /* TS3A227E_REG_INTERRUPT_DISABLE 0x03 */ #define INS_REM_INT_DISABLE 0x01 #define DETECTION_COMPLETE_INT_DISABLE 0x02 #define ADC_COMPLETE_INT_DISABLE 0x04 #define INTB_DISABLE 0x08 /* TS3A227E_REG_SETTING_2 0x05 */ #define KP_ENABLE 0x04 /* TS3A227E_REG_SETTING_3 0x06 */ #define MICBIAS_SETTING_SFT (3) #define MICBIAS_SETTING_MASK (0x7 << MICBIAS_SETTING_SFT) /* TS3A227E_REG_ACCESSORY_STATUS 0x0b */ #define TYPE_3_POLE 0x01 #define TYPE_4_POLE_OMTP 0x02 #define TYPE_4_POLE_STANDARD 0x04 #define JACK_INSERTED 0x08 #define EITHER_MIC_MASK (TYPE_4_POLE_OMTP | TYPE_4_POLE_STANDARD) static const struct reg_default ts3a227e_reg_defaults[] = { { TS3A227E_REG_DEVICE_ID, 0x10 }, { TS3A227E_REG_INTERRUPT, 0x00 }, { TS3A227E_REG_KP_INTERRUPT, 0x00 }, { TS3A227E_REG_INTERRUPT_DISABLE, 0x08 }, { TS3A227E_REG_SETTING_1, 0x23 }, { TS3A227E_REG_SETTING_2, 0x00 }, { TS3A227E_REG_SETTING_3, 0x0e }, { TS3A227E_REG_SWITCH_CONTROL_1, 0x00 }, { TS3A227E_REG_SWITCH_CONTROL_2, 0x00 }, { TS3A227E_REG_SWITCH_STATUS_1, 0x0c }, { TS3A227E_REG_SWITCH_STATUS_2, 0x00 }, { TS3A227E_REG_ACCESSORY_STATUS, 0x00 }, { TS3A227E_REG_ADC_OUTPUT, 0x00 }, { TS3A227E_REG_KP_THRESHOLD_1, 0x20 }, { TS3A227E_REG_KP_THRESHOLD_2, 0x40 }, { TS3A227E_REG_KP_THRESHOLD_3, 0x68 }, }; static bool ts3a227e_readable_reg(struct device *dev, unsigned int reg) { switch (reg) { case TS3A227E_REG_DEVICE_ID ... TS3A227E_REG_KP_THRESHOLD_3: return true; default: return false; } } static bool ts3a227e_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { case TS3A227E_REG_INTERRUPT_DISABLE ... TS3A227E_REG_SWITCH_CONTROL_2: case TS3A227E_REG_KP_THRESHOLD_1 ... TS3A227E_REG_KP_THRESHOLD_3: return true; default: return false; } } static bool ts3a227e_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { case TS3A227E_REG_INTERRUPT ... TS3A227E_REG_INTERRUPT_DISABLE: case TS3A227E_REG_SETTING_2: case TS3A227E_REG_SWITCH_STATUS_1 ... TS3A227E_REG_ADC_OUTPUT: return true; default: return false; } } static void ts3a227e_jack_report(struct ts3a227e *ts3a227e) { unsigned int i; int report = 0; if (!ts3a227e->jack) return; if (ts3a227e->plugged) report = SND_JACK_HEADPHONE; if (ts3a227e->mic_present) report |= SND_JACK_MICROPHONE; for (i = 0; i < TS3A227E_NUM_BUTTONS; i++) { if (ts3a227e->buttons_held & (1 << i)) report |= ts3a227e_buttons[i]; } snd_soc_jack_report(ts3a227e->jack, report, TS3A227E_JACK_MASK); } static void ts3a227e_new_jack_state(struct ts3a227e *ts3a227e, unsigned acc_reg) { bool plugged, mic_present; plugged = !!(acc_reg & JACK_INSERTED); mic_present = plugged && !!(acc_reg & EITHER_MIC_MASK); ts3a227e->plugged = plugged; if (mic_present != ts3a227e->mic_present) { ts3a227e->mic_present = mic_present; ts3a227e->buttons_held = 0; if (mic_present) { /* Enable key press detection. */ regmap_update_bits(ts3a227e->regmap, TS3A227E_REG_SETTING_2, KP_ENABLE, KP_ENABLE); } } } static irqreturn_t ts3a227e_interrupt(int irq, void *data) { struct ts3a227e *ts3a227e = (struct ts3a227e *)data; struct regmap *regmap = ts3a227e->regmap; unsigned int int_reg, kp_int_reg, acc_reg, i; struct device *dev = ts3a227e->dev; int ret; /* Check for plug/unplug. */ ret = regmap_read(regmap, TS3A227E_REG_INTERRUPT, &int_reg); if (ret) { dev_err(dev, "failed to clear interrupt ret=%d\n", ret); return IRQ_NONE; } if (int_reg & (DETECTION_COMPLETE_EVENT | INS_REM_EVENT)) { regmap_read(regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg); ts3a227e_new_jack_state(ts3a227e, acc_reg); } /* Report any key events. */ ret = regmap_read(regmap, TS3A227E_REG_KP_INTERRUPT, &kp_int_reg); if (ret) { dev_err(dev, "failed to clear key interrupt ret=%d\n", ret); return IRQ_NONE; } for (i = 0; i < TS3A227E_NUM_BUTTONS; i++) { if (kp_int_reg & PRESS_MASK(i)) ts3a227e->buttons_held |= (1 << i); if (kp_int_reg & RELEASE_MASK(i)) ts3a227e->buttons_held &= ~(1 << i); } ts3a227e_jack_report(ts3a227e); return IRQ_HANDLED; } /** * ts3a227e_enable_jack_detect - Specify a jack for event reporting * * @component: component to register the jack with * @jack: jack to use to report headset and button events on * * After this function has been called the headset insert/remove and button * events 0-3 will be routed to the given jack. Jack can be null to stop * reporting. */ int ts3a227e_enable_jack_detect(struct snd_soc_component *component, struct snd_soc_jack *jack) { struct ts3a227e *ts3a227e = snd_soc_component_get_drvdata(component); snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA); snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND); snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP); snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN); ts3a227e->jack = jack; ts3a227e_jack_report(ts3a227e); return 0; } EXPORT_SYMBOL_GPL(ts3a227e_enable_jack_detect); static struct snd_soc_component_driver ts3a227e_soc_driver; static const struct regmap_config ts3a227e_regmap_config = { .val_bits = 8, .reg_bits = 8, .max_register = TS3A227E_REG_KP_THRESHOLD_3, .readable_reg = ts3a227e_readable_reg, .writeable_reg = ts3a227e_writeable_reg, .volatile_reg = ts3a227e_volatile_reg, .cache_type = REGCACHE_RBTREE, .reg_defaults = ts3a227e_reg_defaults, .num_reg_defaults = ARRAY_SIZE(ts3a227e_reg_defaults), }; static int ts3a227e_parse_device_property(struct ts3a227e *ts3a227e, struct device *dev) { u32 micbias; int err; err = device_property_read_u32(dev, "ti,micbias", &micbias); if (!err) { regmap_update_bits(ts3a227e->regmap, TS3A227E_REG_SETTING_3, MICBIAS_SETTING_MASK, (micbias & 0x07) << MICBIAS_SETTING_SFT); } return 0; } static int ts3a227e_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct ts3a227e *ts3a227e; struct device *dev = &i2c->dev; int ret; unsigned int acc_reg; ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL); if (ts3a227e == NULL) return -ENOMEM; i2c_set_clientdata(i2c, ts3a227e); ts3a227e->dev = dev; ts3a227e->irq = i2c->irq; ts3a227e->regmap = devm_regmap_init_i2c(i2c, &ts3a227e_regmap_config); if (IS_ERR(ts3a227e->regmap)) return PTR_ERR(ts3a227e->regmap); ret = ts3a227e_parse_device_property(ts3a227e, dev); if (ret) { dev_err(dev, "Failed to parse device property: %d\n", ret); return ret; } ret = devm_request_threaded_irq(dev, i2c->irq, NULL, ts3a227e_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "TS3A227E", ts3a227e); if (ret) { dev_err(dev, "Cannot request irq %d (%d)\n", i2c->irq, ret); return ret; } ret = devm_snd_soc_register_component(&i2c->dev, &ts3a227e_soc_driver, NULL, 0); if (ret) return ret; /* Enable interrupts except for ADC complete. */ regmap_update_bits(ts3a227e->regmap, TS3A227E_REG_INTERRUPT_DISABLE, INTB_DISABLE | ADC_COMPLETE_INT_DISABLE, ADC_COMPLETE_INT_DISABLE); /* Read jack status because chip might not trigger interrupt at boot. */ regmap_read(ts3a227e->regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg); ts3a227e_new_jack_state(ts3a227e, acc_reg); ts3a227e_jack_report(ts3a227e); return 0; } #ifdef CONFIG_PM_SLEEP static int ts3a227e_suspend(struct device *dev) { struct ts3a227e *ts3a227e = dev_get_drvdata(dev); dev_dbg(ts3a227e->dev, "suspend disable irq\n"); disable_irq(ts3a227e->irq); return 0; } static int ts3a227e_resume(struct device *dev) { struct ts3a227e *ts3a227e = dev_get_drvdata(dev); dev_dbg(ts3a227e->dev, "resume enable irq\n"); enable_irq(ts3a227e->irq); return 0; } #endif static const struct dev_pm_ops ts3a227e_pm = { SET_SYSTEM_SLEEP_PM_OPS(ts3a227e_suspend, ts3a227e_resume) }; static const struct i2c_device_id ts3a227e_i2c_ids[] = { { "ts3a227e", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ts3a227e_i2c_ids); static const struct of_device_id ts3a227e_of_match[] = { { .compatible = "ti,ts3a227e", }, { } }; MODULE_DEVICE_TABLE(of, ts3a227e_of_match); static struct i2c_driver ts3a227e_driver = { .driver = { .name = "ts3a227e", .pm = &ts3a227e_pm, .of_match_table = of_match_ptr(ts3a227e_of_match), }, .probe = ts3a227e_i2c_probe, .id_table = ts3a227e_i2c_ids, }; module_i2c_driver(ts3a227e_driver); MODULE_DESCRIPTION("ASoC ts3a227e driver"); MODULE_AUTHOR("Dylan Reid <dgreid@chromium.org>"); MODULE_LICENSE("GPL v2");
gpl-2.0
matianfu/kunlun-kernel
drivers/net/pppoe.c
759
28154
/** -*- linux-c -*- *********************************************************** * Linux PPP over Ethernet (PPPoX/PPPoE) Sockets * * PPPoX --- Generic PPP encapsulation socket family * PPPoE --- PPP over Ethernet (RFC 2516) * * * Version: 0.7.0 * * 070228 : Fix to allow multiple sessions with same remote MAC and same * session id by including the local device ifindex in the * tuple identifying a session. This also ensures packets can't * be injected into a session from interfaces other than the one * specified by userspace. Florian Zumbiehl <florz@florz.de> * (Oh, BTW, this one is YYMMDD, in case you were wondering ...) * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme * 030700 : Fixed connect logic to allow for disconnect. * 270700 : Fixed potential SMP problems; we must protect against * simultaneous invocation of ppp_input * and ppp_unregister_channel. * 040800 : Respect reference count mechanisms on net-devices. * 200800 : fix kfree(skb) in pppoe_rcv (acme) * Module reference count is decremented in the right spot now, * guards against sock_put not actually freeing the sk * in pppoe_release. * 051000 : Initialization cleanup. * 111100 : Fix recvmsg. * 050101 : Fix PADT procesing. * 140501 : Use pppoe_rcv_core to handle all backlog. (Alexey) * 170701 : Do not lock_sock with rwlock held. (DaveM) * Ignore discovery frames if user has socket * locked. (DaveM) * Ignore return value of dev_queue_xmit in __pppoe_xmit * or else we may kfree an SKB twice. (DaveM) * 190701 : When doing copies of skb's in __pppoe_xmit, always delete * the original skb that was passed in on success, never on * failure. Delete the copy of the skb on failure to avoid * a memory leak. * 081001 : Misc. cleanup (licence string, non-blocking, prevent * reference of device on close). * 121301 : New ppp channels interface; cannot unregister a channel * from interrupts. Thus, we mark the socket as a ZOMBIE * and do the unregistration later. * 081002 : seq_file support for proc stuff -acme * 111602 : Merge all 2.4 fixes into 2.5/2.6 tree. Label 2.5/2.6 * as version 0.7. Spacing cleanup. * Author: Michal Ostrowski <mostrows@speakeasy.net> * Contributors: * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * David S. Miller (davem@redhat.com) * * License: * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/string.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/net.h> #include <linux/inetdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/if_ether.h> #include <linux/if_pppox.h> #include <linux/ppp_channel.h> #include <linux/ppp_defs.h> #include <linux/if_ppp.h> #include <linux/notifier.h> #include <linux/file.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/nsproxy.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/sock.h> #include <asm/uaccess.h> #define PPPOE_HASH_BITS 4 #define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS) #define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1) static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb); static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); static const struct proto_ops pppoe_ops; static struct ppp_channel_ops pppoe_chan_ops; /* per-net private data for this module */ static int pppoe_net_id __read_mostly; struct pppoe_net { /* * we could use _single_ hash table for all * nets by injecting net id into the hash but * it would increase hash chains and add * a few additional math comparations messy * as well, moreover in case of SMP less locking * controversy here */ struct pppox_sock *hash_table[PPPOE_HASH_SIZE]; rwlock_t hash_lock; }; /* * PPPoE could be in the following stages: * 1) Discovery stage (to obtain remote MAC and Session ID) * 2) Session stage (MAC and SID are known) * * Ethernet frames have a special tag for this but * we use simplier approach based on session id */ static inline bool stage_session(__be16 sid) { return sid != 0; } static inline struct pppoe_net *pppoe_pernet(struct net *net) { BUG_ON(!net); return net_generic(net, pppoe_net_id); } static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b) { return a->sid == b->sid && !memcmp(a->remote, b->remote, ETH_ALEN); } static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr) { return a->sid == sid && !memcmp(a->remote, addr, ETH_ALEN); } #if 8 % PPPOE_HASH_BITS #error 8 must be a multiple of PPPOE_HASH_BITS #endif static int hash_item(__be16 sid, unsigned char *addr) { unsigned char hash = 0; unsigned int i; for (i = 0; i < ETH_ALEN; i++) hash ^= addr[i]; for (i = 0; i < sizeof(sid_t) * 8; i += 8) hash ^= (__force __u32)sid >> i; for (i = 8; (i >>= 1) >= PPPOE_HASH_BITS;) hash ^= hash >> i; return hash & PPPOE_HASH_MASK; } /********************************************************************** * * Set/get/delete/rehash items (internal versions) * **********************************************************************/ static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid, unsigned char *addr, int ifindex) { int hash = hash_item(sid, addr); struct pppox_sock *ret; ret = pn->hash_table[hash]; while (ret) { if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) return ret; ret = ret->next; } return NULL; } static int __set_item(struct pppoe_net *pn, struct pppox_sock *po) { int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); struct pppox_sock *ret; ret = pn->hash_table[hash]; while (ret) { if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_ifindex == po->pppoe_ifindex) return -EALREADY; ret = ret->next; } po->next = pn->hash_table[hash]; pn->hash_table[hash] = po; return 0; } static struct pppox_sock *__delete_item(struct pppoe_net *pn, __be16 sid, char *addr, int ifindex) { int hash = hash_item(sid, addr); struct pppox_sock *ret, **src; ret = pn->hash_table[hash]; src = &pn->hash_table[hash]; while (ret) { if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) { *src = ret->next; break; } src = &ret->next; ret = ret->next; } return ret; } /********************************************************************** * * Set/get/delete/rehash items * **********************************************************************/ static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid, unsigned char *addr, int ifindex) { struct pppox_sock *po; read_lock_bh(&pn->hash_lock); po = __get_item(pn, sid, addr, ifindex); if (po) sock_hold(sk_pppox(po)); read_unlock_bh(&pn->hash_lock); return po; } static inline struct pppox_sock *get_item_by_addr(struct net *net, struct sockaddr_pppox *sp) { struct net_device *dev; struct pppoe_net *pn; struct pppox_sock *pppox_sock = NULL; int ifindex; rcu_read_lock(); dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev); if (dev) { ifindex = dev->ifindex; pn = pppoe_pernet(net); pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); } rcu_read_unlock(); return pppox_sock; } static inline struct pppox_sock *delete_item(struct pppoe_net *pn, __be16 sid, char *addr, int ifindex) { struct pppox_sock *ret; write_lock_bh(&pn->hash_lock); ret = __delete_item(pn, sid, addr, ifindex); write_unlock_bh(&pn->hash_lock); return ret; } /*************************************************************************** * * Handler for device events. * Certain device events require that sockets be unconnected. * **************************************************************************/ static void pppoe_flush_dev(struct net_device *dev) { struct pppoe_net *pn; int i; pn = pppoe_pernet(dev_net(dev)); write_lock_bh(&pn->hash_lock); for (i = 0; i < PPPOE_HASH_SIZE; i++) { struct pppox_sock *po = pn->hash_table[i]; struct sock *sk; while (po) { while (po && po->pppoe_dev != dev) { po = po->next; } if (!po) break; sk = sk_pppox(po); /* We always grab the socket lock, followed by the * hash_lock, in that order. Since we should hold the * sock lock while doing any unbinding, we need to * release the lock we're holding. Hold a reference to * the sock so it doesn't disappear as we're jumping * between locks. */ sock_hold(sk); write_unlock_bh(&pn->hash_lock); lock_sock(sk); if (po->pppoe_dev == dev && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { pppox_unbind_sock(sk); sk->sk_state = PPPOX_ZOMBIE; sk->sk_state_change(sk); po->pppoe_dev = NULL; dev_put(dev); } release_sock(sk); sock_put(sk); /* Restart the process from the start of the current * hash chain. We dropped locks so the world may have * change from underneath us. */ BUG_ON(pppoe_pernet(dev_net(dev)) == NULL); write_lock_bh(&pn->hash_lock); po = pn->hash_table[i]; } } write_unlock_bh(&pn->hash_lock); } static int pppoe_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = (struct net_device *)ptr; /* Only look at sockets that are using this specific device. */ switch (event) { case NETDEV_CHANGEMTU: /* A change in mtu is a bad thing, requiring * LCP re-negotiation. */ case NETDEV_GOING_DOWN: case NETDEV_DOWN: /* Find every socket on this device and kill it. */ pppoe_flush_dev(dev); break; default: break; } return NOTIFY_DONE; } static struct notifier_block pppoe_notifier = { .notifier_call = pppoe_device_event, }; /************************************************************************ * * Do the real work of receiving a PPPoE Session frame. * ***********************************************************************/ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) { struct pppox_sock *po = pppox_sk(sk); struct pppox_sock *relay_po; /* Backlog receive. Semantics of backlog rcv preclude any code from * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state * can't change. */ if (sk->sk_state & PPPOX_BOUND) { ppp_input(&po->chan, skb); } else if (sk->sk_state & PPPOX_RELAY) { relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay); if (relay_po == NULL) goto abort_kfree; if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0) goto abort_put; if (!__pppoe_xmit(sk_pppox(relay_po), skb)) goto abort_put; } else { if (sock_queue_rcv_skb(sk, skb)) goto abort_kfree; } return NET_RX_SUCCESS; abort_put: sock_put(sk_pppox(relay_po)); abort_kfree: kfree_skb(skb); return NET_RX_DROP; } /************************************************************************ * * Receive wrapper called in BH context. * ***********************************************************************/ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct pppoe_hdr *ph; struct pppox_sock *po; struct pppoe_net *pn; int len; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto out; if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto drop; ph = pppoe_hdr(skb); len = ntohs(ph->length); skb_pull_rcsum(skb, sizeof(*ph)); if (skb->len < len) goto drop; if (pskb_trim_rcsum(skb, len)) goto drop; pn = pppoe_pernet(dev_net(dev)); /* Note that get_item does a sock_hold(), so sk_pppox(po) * is known to be safe. */ po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (!po) goto drop; return sk_receive_skb(sk_pppox(po), skb, 0); drop: kfree_skb(skb); out: return NET_RX_DROP; } /************************************************************************ * * Receive a PPPoE Discovery frame. * This is solely for detection of PADT frames * ***********************************************************************/ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct pppoe_hdr *ph; struct pppox_sock *po; struct pppoe_net *pn; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto out; if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto abort; ph = pppoe_hdr(skb); if (ph->code != PADT_CODE) goto abort; pn = pppoe_pernet(dev_net(dev)); po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (po) { struct sock *sk = sk_pppox(po); bh_lock_sock(sk); /* If the user has locked the socket, just ignore * the packet. With the way two rcv protocols hook into * one socket family type, we cannot (easily) distinguish * what kind of SKB it is during backlog rcv. */ if (sock_owned_by_user(sk) == 0) { /* We're no longer connect at the PPPOE layer, * and must wait for ppp channel to disconnect us. */ sk->sk_state = PPPOX_ZOMBIE; } bh_unlock_sock(sk); sock_put(sk); } abort: kfree_skb(skb); out: return NET_RX_SUCCESS; /* Lies... :-) */ } static struct packet_type pppoes_ptype __read_mostly = { .type = cpu_to_be16(ETH_P_PPP_SES), .func = pppoe_rcv, }; static struct packet_type pppoed_ptype __read_mostly = { .type = cpu_to_be16(ETH_P_PPP_DISC), .func = pppoe_disc_rcv, }; static struct proto pppoe_sk_proto __read_mostly = { .name = "PPPOE", .owner = THIS_MODULE, .obj_size = sizeof(struct pppox_sock), }; /*********************************************************************** * * Initialize a new struct sock. * **********************************************************************/ static int pppoe_create(struct net *net, struct socket *sock) { struct sock *sk; sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->state = SS_UNCONNECTED; sock->ops = &pppoe_ops; sk->sk_backlog_rcv = pppoe_rcv_core; sk->sk_state = PPPOX_NONE; sk->sk_type = SOCK_STREAM; sk->sk_family = PF_PPPOX; sk->sk_protocol = PX_PROTO_OE; return 0; } static int pppoe_release(struct socket *sock) { struct sock *sk = sock->sk; struct pppox_sock *po; struct pppoe_net *pn; struct net *net = NULL; if (!sk) return 0; lock_sock(sk); if (sock_flag(sk, SOCK_DEAD)) { release_sock(sk); return -EBADF; } po = pppox_sk(sk); if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } pppox_unbind_sock(sk); /* Signal the death of the socket. */ sk->sk_state = PPPOX_DEAD; net = sock_net(sk); pn = pppoe_pernet(net); /* * protect "po" from concurrent updates * on pppoe_flush_dev */ delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); sock_orphan(sk); sock->sk = NULL; skb_queue_purge(&sk->sk_receive_queue); release_sock(sk); sock_put(sk); return 0; } static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, int sockaddr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr; struct pppox_sock *po = pppox_sk(sk); struct net_device *dev = NULL; struct pppoe_net *pn; struct net *net = NULL; int error; lock_sock(sk); error = -EINVAL; if (sp->sa_protocol != PX_PROTO_OE) goto end; /* Check for already bound sockets */ error = -EBUSY; if ((sk->sk_state & PPPOX_CONNECTED) && stage_session(sp->sa_addr.pppoe.sid)) goto end; /* Check for already disconnected sockets, on attempts to disconnect */ error = -EALREADY; if ((sk->sk_state & PPPOX_DEAD) && !stage_session(sp->sa_addr.pppoe.sid)) goto end; error = 0; /* Delete the old binding */ if (stage_session(po->pppoe_pa.sid)) { pppox_unbind_sock(sk); pn = pppoe_pernet(sock_net(sk)); delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); if (po->pppoe_dev) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } memset(sk_pppox(po) + 1, 0, sizeof(struct pppox_sock) - sizeof(struct sock)); sk->sk_state = PPPOX_NONE; } /* Re-bind in session stage only */ if (stage_session(sp->sa_addr.pppoe.sid)) { error = -ENODEV; net = sock_net(sk); dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev); if (!dev) goto err_put; po->pppoe_dev = dev; po->pppoe_ifindex = dev->ifindex; pn = pppoe_pernet(net); if (!(dev->flags & IFF_UP)) { goto err_put; } memcpy(&po->pppoe_pa, &sp->sa_addr.pppoe, sizeof(struct pppoe_addr)); write_lock_bh(&pn->hash_lock); error = __set_item(pn, po); write_unlock_bh(&pn->hash_lock); if (error < 0) goto err_put; po->chan.hdrlen = (sizeof(struct pppoe_hdr) + dev->hard_header_len); po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr); po->chan.private = sk; po->chan.ops = &pppoe_chan_ops; error = ppp_register_net_channel(dev_net(dev), &po->chan); if (error) { delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); goto err_put; } sk->sk_state = PPPOX_CONNECTED; } po->num = sp->sa_addr.pppoe.sid; end: release_sock(sk); return error; err_put: if (po->pppoe_dev) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } goto end; } static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr, int *usockaddr_len, int peer) { int len = sizeof(struct sockaddr_pppox); struct sockaddr_pppox sp; sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_OE; memcpy(&sp.sa_addr.pppoe, &pppox_sk(sock->sk)->pppoe_pa, sizeof(struct pppoe_addr)); memcpy(uaddr, &sp, len); *usockaddr_len = len; return 0; } static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct pppox_sock *po = pppox_sk(sk); int val; int err; switch (cmd) { case PPPIOCGMRU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (put_user(po->pppoe_dev->mtu - sizeof(struct pppoe_hdr) - PPP_HDRLEN, (int __user *)arg)) break; err = 0; break; case PPPIOCSMRU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (get_user(val, (int __user *)arg)) break; if (val < (po->pppoe_dev->mtu - sizeof(struct pppoe_hdr) - PPP_HDRLEN)) err = 0; else err = -EINVAL; break; case PPPIOCSFLAGS: err = -EFAULT; if (get_user(val, (int __user *)arg)) break; err = 0; break; case PPPOEIOCSFWD: { struct pppox_sock *relay_po; err = -EBUSY; if (sk->sk_state & (PPPOX_BOUND | PPPOX_ZOMBIE | PPPOX_DEAD)) break; err = -ENOTCONN; if (!(sk->sk_state & PPPOX_CONNECTED)) break; /* PPPoE address from the user specifies an outbound PPPoE address which frames are forwarded to */ err = -EFAULT; if (copy_from_user(&po->pppoe_relay, (void __user *)arg, sizeof(struct sockaddr_pppox))) break; err = -EINVAL; if (po->pppoe_relay.sa_family != AF_PPPOX || po->pppoe_relay.sa_protocol != PX_PROTO_OE) break; /* Check that the socket referenced by the address actually exists. */ relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay); if (!relay_po) break; sock_put(sk_pppox(relay_po)); sk->sk_state |= PPPOX_RELAY; err = 0; break; } case PPPOEIOCDFWD: err = -EALREADY; if (!(sk->sk_state & PPPOX_RELAY)) break; sk->sk_state &= ~PPPOX_RELAY; err = 0; break; default: err = -ENOTTY; } return err; } static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len) { struct sk_buff *skb; struct sock *sk = sock->sk; struct pppox_sock *po = pppox_sk(sk); int error; struct pppoe_hdr hdr; struct pppoe_hdr *ph; struct net_device *dev; char *start; lock_sock(sk); if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { error = -ENOTCONN; goto end; } hdr.ver = 1; hdr.type = 1; hdr.code = 0; hdr.sid = po->num; dev = po->pppoe_dev; error = -EMSGSIZE; if (total_len > (dev->mtu + dev->hard_header_len)) goto end; skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32, 0, GFP_KERNEL); if (!skb) { error = -ENOMEM; goto end; } /* Reserve space for headers. */ skb_reserve(skb, dev->hard_header_len); skb_reset_network_header(skb); skb->dev = dev; skb->priority = sk->sk_priority; skb->protocol = cpu_to_be16(ETH_P_PPP_SES); ph = (struct pppoe_hdr *)skb_put(skb, total_len + sizeof(struct pppoe_hdr)); start = (char *)&ph->tag[0]; error = memcpy_fromiovec(start, m->msg_iov, total_len); if (error < 0) { kfree_skb(skb); goto end; } error = total_len; dev_hard_header(skb, dev, ETH_P_PPP_SES, po->pppoe_pa.remote, NULL, total_len); memcpy(ph, &hdr, sizeof(struct pppoe_hdr)); ph->length = htons(total_len); dev_queue_xmit(skb); end: release_sock(sk); return error; } /************************************************************************ * * xmit function for internal use. * ***********************************************************************/ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) { struct pppox_sock *po = pppox_sk(sk); struct net_device *dev = po->pppoe_dev; struct pppoe_hdr *ph; int data_len = skb->len; /* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP * xmit operations conclude prior to an unregistration call. Thus * sk->sk_state cannot change, so we don't need to do lock_sock(). * But, we also can't do a lock_sock since that introduces a potential * deadlock as we'd reverse the lock ordering used when calling * ppp_unregister_channel(). */ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) goto abort; if (!dev) goto abort; /* Copy the data if there is no space for the header or if it's * read-only. */ if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len)) goto abort; __skb_push(skb, sizeof(*ph)); skb_reset_network_header(skb); ph = pppoe_hdr(skb); ph->ver = 1; ph->type = 1; ph->code = 0; ph->sid = po->num; ph->length = htons(data_len); skb->protocol = cpu_to_be16(ETH_P_PPP_SES); skb->dev = dev; dev_hard_header(skb, dev, ETH_P_PPP_SES, po->pppoe_pa.remote, NULL, data_len); dev_queue_xmit(skb); return 1; abort: kfree_skb(skb); return 1; } /************************************************************************ * * xmit function called by generic PPP driver * sends PPP frame over PPPoE socket * ***********************************************************************/ static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) { struct sock *sk = (struct sock *)chan->private; return __pppoe_xmit(sk, skb); } static struct ppp_channel_ops pppoe_chan_ops = { .start_xmit = pppoe_xmit, }; static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int error = 0; if (sk->sk_state & PPPOX_BOUND) { error = -EIO; goto end; } skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &error); if (error < 0) goto end; m->msg_namelen = 0; if (skb) { total_len = min_t(size_t, total_len, skb->len); error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); if (error == 0) error = total_len; } kfree_skb(skb); end: return error; } #ifdef CONFIG_PROC_FS static int pppoe_seq_show(struct seq_file *seq, void *v) { struct pppox_sock *po; char *dev_name; if (v == SEQ_START_TOKEN) { seq_puts(seq, "Id Address Device\n"); goto out; } po = v; dev_name = po->pppoe_pa.dev; seq_printf(seq, "%08X %pM %8s\n", po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name); out: return 0; } static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos) { struct pppox_sock *po; int i; for (i = 0; i < PPPOE_HASH_SIZE; i++) { po = pn->hash_table[i]; while (po) { if (!pos--) goto out; po = po->next; } } out: return po; } static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos) __acquires(pn->hash_lock) { struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); loff_t l = *pos; read_lock_bh(&pn->hash_lock); return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN; } static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); struct pppox_sock *po; ++*pos; if (v == SEQ_START_TOKEN) { po = pppoe_get_idx(pn, 0); goto out; } po = v; if (po->next) po = po->next; else { int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); po = NULL; while (++hash < PPPOE_HASH_SIZE) { po = pn->hash_table[hash]; if (po) break; } } out: return po; } static void pppoe_seq_stop(struct seq_file *seq, void *v) __releases(pn->hash_lock) { struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); read_unlock_bh(&pn->hash_lock); } static const struct seq_operations pppoe_seq_ops = { .start = pppoe_seq_start, .next = pppoe_seq_next, .stop = pppoe_seq_stop, .show = pppoe_seq_show, }; static int pppoe_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &pppoe_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations pppoe_seq_fops = { .owner = THIS_MODULE, .open = pppoe_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif /* CONFIG_PROC_FS */ static const struct proto_ops pppoe_ops = { .family = AF_PPPOX, .owner = THIS_MODULE, .release = pppoe_release, .bind = sock_no_bind, .connect = pppoe_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pppoe_getname, .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = pppoe_sendmsg, .recvmsg = pppoe_recvmsg, .mmap = sock_no_mmap, .ioctl = pppox_ioctl, }; static struct pppox_proto pppoe_proto = { .create = pppoe_create, .ioctl = pppoe_ioctl, .owner = THIS_MODULE, }; static __net_init int pppoe_init_net(struct net *net) { struct pppoe_net *pn = pppoe_pernet(net); struct proc_dir_entry *pde; rwlock_init(&pn->hash_lock); pde = proc_net_fops_create(net, "pppoe", S_IRUGO, &pppoe_seq_fops); #ifdef CONFIG_PROC_FS if (!pde) return -ENOMEM; #endif return 0; } static __net_exit void pppoe_exit_net(struct net *net) { proc_net_remove(net, "pppoe"); } static struct pernet_operations pppoe_net_ops = { .init = pppoe_init_net, .exit = pppoe_exit_net, .id = &pppoe_net_id, .size = sizeof(struct pppoe_net), }; static int __init pppoe_init(void) { int err; err = register_pernet_device(&pppoe_net_ops); if (err) goto out; err = proto_register(&pppoe_sk_proto, 0); if (err) goto out_unregister_net_ops; err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto); if (err) goto out_unregister_pppoe_proto; dev_add_pack(&pppoes_ptype); dev_add_pack(&pppoed_ptype); register_netdevice_notifier(&pppoe_notifier); return 0; out_unregister_pppoe_proto: proto_unregister(&pppoe_sk_proto); out_unregister_net_ops: unregister_pernet_device(&pppoe_net_ops); out: return err; } static void __exit pppoe_exit(void) { unregister_netdevice_notifier(&pppoe_notifier); dev_remove_pack(&pppoed_ptype); dev_remove_pack(&pppoes_ptype); unregister_pppox_proto(PX_PROTO_OE); proto_unregister(&pppoe_sk_proto); unregister_pernet_device(&pppoe_net_ops); } module_init(pppoe_init); module_exit(pppoe_exit); MODULE_AUTHOR("Michal Ostrowski <mostrows@speakeasy.net>"); MODULE_DESCRIPTION("PPP over Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_PPPOX);
gpl-2.0
Bdaman80/BD-Ace
net/ipv4/inet_connection_sock.c
759
21124
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Support for INET connection oriented protocols. * * Authors: See the TCP sources * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or(at your option) any later version. */ #include <linux/module.h> #include <linux/jhash.h> #include <net/inet_connection_sock.h> #include <net/inet_hashtables.h> #include <net/inet_timewait_sock.h> #include <net/ip.h> #include <net/route.h> #include <net/tcp_states.h> #include <net/xfrm.h> #ifdef INET_CSK_DEBUG const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; EXPORT_SYMBOL(inet_csk_timer_bug_msg); #endif /* * This struct holds the first and last local port number. */ struct local_ports sysctl_local_ports __read_mostly = { .lock = SEQLOCK_UNLOCKED, .range = { 32768, 61000 }, }; unsigned long *sysctl_local_reserved_ports; EXPORT_SYMBOL(sysctl_local_reserved_ports); void inet_get_local_port_range(int *low, int *high) { unsigned seq; do { seq = read_seqbegin(&sysctl_local_ports.lock); *low = sysctl_local_ports.range[0]; *high = sysctl_local_ports.range[1]; } while (read_seqretry(&sysctl_local_ports.lock, seq)); } EXPORT_SYMBOL(inet_get_local_port_range); int inet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb) { const __be32 sk_rcv_saddr = inet_rcv_saddr(sk); struct sock *sk2; struct hlist_node *node; int reuse = sk->sk_reuse; /* * Unlike other sk lookup places we do not check * for sk_net here, since _all_ the socks listed * in tb->owners list belong to the same net - the * one this bucket belongs to. */ sk_for_each_bound(sk2, node, &tb->owners) { if (sk != sk2 && !inet_v6_ipv6only(sk2) && (!sk->sk_bound_dev_if || !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { if (!reuse || !sk2->sk_reuse || sk2->sk_state == TCP_LISTEN) { const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); if (!sk2_rcv_saddr || !sk_rcv_saddr || sk2_rcv_saddr == sk_rcv_saddr) break; } } } return node != NULL; } EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); /* Obtain a reference to a local port for the given sock, * if snum is zero it means select any available local port. */ int inet_csk_get_port(struct sock *sk, unsigned short snum) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct inet_bind_hashbucket *head; struct hlist_node *node; struct inet_bind_bucket *tb; int ret, attempts = 5; struct net *net = sock_net(sk); int smallest_size = -1, smallest_rover; local_bh_disable(); if (!snum) { int remaining, rover, low, high; again: inet_get_local_port_range(&low, &high); remaining = (high - low) + 1; smallest_rover = rover = net_random() % remaining + low; smallest_size = -1; do { if (inet_is_reserved_local_port(rover)) goto next_nolock; head = &hashinfo->bhash[inet_bhashfn(net, rover, hashinfo->bhash_size)]; spin_lock(&head->lock); inet_bind_bucket_for_each(tb, node, &head->chain) if (net_eq(ib_net(tb), net) && tb->port == rover) { if (tb->fastreuse > 0 && sk->sk_reuse && sk->sk_state != TCP_LISTEN && (tb->num_owners < smallest_size || smallest_size == -1)) { smallest_size = tb->num_owners; smallest_rover = rover; if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { spin_unlock(&head->lock); snum = smallest_rover; goto have_snum; } } goto next; } break; next: spin_unlock(&head->lock); next_nolock: if (++rover > high) rover = low; } while (--remaining > 0); /* Exhausted local port range during search? It is not * possible for us to be holding one of the bind hash * locks if this test triggers, because if 'remaining' * drops to zero, we broke out of the do/while loop at * the top level, not from the 'break;' statement. */ ret = 1; if (remaining <= 0) { if (smallest_size != -1) { snum = smallest_rover; goto have_snum; } goto fail; } /* OK, here is the one we will use. HEAD is * non-NULL and we hold it's mutex. */ snum = rover; } else { have_snum: head = &hashinfo->bhash[inet_bhashfn(net, snum, hashinfo->bhash_size)]; spin_lock(&head->lock); inet_bind_bucket_for_each(tb, node, &head->chain) if (net_eq(ib_net(tb), net) && tb->port == snum) goto tb_found; } tb = NULL; goto tb_not_found; tb_found: if (!hlist_empty(&tb->owners)) { if (tb->fastreuse > 0 && sk->sk_reuse && sk->sk_state != TCP_LISTEN && smallest_size == -1) { goto success; } else { ret = 1; if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { if (sk->sk_reuse && sk->sk_state != TCP_LISTEN && smallest_size != -1 && --attempts >= 0) { spin_unlock(&head->lock); goto again; } goto fail_unlock; } } } tb_not_found: ret = 1; if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, net, head, snum)) == NULL) goto fail_unlock; if (hlist_empty(&tb->owners)) { if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) tb->fastreuse = 1; else tb->fastreuse = 0; } else if (tb->fastreuse && (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) tb->fastreuse = 0; success: if (!inet_csk(sk)->icsk_bind_hash) inet_bind_hash(sk, tb, snum); WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); ret = 0; fail_unlock: spin_unlock(&head->lock); fail: local_bh_enable(); return ret; } EXPORT_SYMBOL_GPL(inet_csk_get_port); /* * Wait for an incoming connection, avoid race conditions. This must be called * with the socket locked. */ static int inet_csk_wait_for_connect(struct sock *sk, long timeo) { struct inet_connection_sock *icsk = inet_csk(sk); DEFINE_WAIT(wait); int err; /* * True wake-one mechanism for incoming connections: only * one process gets woken up, not the 'whole herd'. * Since we do not 'race & poll' for established sockets * anymore, the common case will execute the loop only once. * * Subtle issue: "add_wait_queue_exclusive()" will be added * after any current non-exclusive waiters, and we know that * it will always _stay_ after any new non-exclusive waiters * because all non-exclusive waiters are added at the * beginning of the wait-queue. As such, it's ok to "drop" * our exclusiveness temporarily when we get woken up without * having to remove and re-insert us on the wait queue. */ for (;;) { prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); release_sock(sk); if (reqsk_queue_empty(&icsk->icsk_accept_queue)) timeo = schedule_timeout(timeo); lock_sock(sk); err = 0; if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) break; err = -EINVAL; if (sk->sk_state != TCP_LISTEN) break; err = sock_intr_errno(timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!timeo) break; } finish_wait(sk_sleep(sk), &wait); return err; } /* * This will accept the next outstanding connection. */ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) { struct inet_connection_sock *icsk = inet_csk(sk); struct sock *newsk; int error; lock_sock(sk); /* We need to make sure that this socket is listening, * and that it has something pending. */ error = -EINVAL; if (sk->sk_state != TCP_LISTEN) goto out_err; /* Find already established connection */ if (reqsk_queue_empty(&icsk->icsk_accept_queue)) { long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); /* If this is a non blocking socket don't sleep */ error = -EAGAIN; if (!timeo) goto out_err; error = inet_csk_wait_for_connect(sk, timeo); if (error) goto out_err; } newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); WARN_ON(newsk->sk_state == TCP_SYN_RECV); out: release_sock(sk); return newsk; out_err: newsk = NULL; *err = error; goto out; } EXPORT_SYMBOL(inet_csk_accept); /* * Using different timers for retransmit, delayed acks and probes * We may wish use just one timer maintaining a list of expire jiffies * to optimize. */ void inet_csk_init_xmit_timers(struct sock *sk, void (*retransmit_handler)(unsigned long), void (*delack_handler)(unsigned long), void (*keepalive_handler)(unsigned long)) { struct inet_connection_sock *icsk = inet_csk(sk); setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler, (unsigned long)sk); setup_timer(&icsk->icsk_delack_timer, delack_handler, (unsigned long)sk); setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk); icsk->icsk_pending = icsk->icsk_ack.pending = 0; } EXPORT_SYMBOL(inet_csk_init_xmit_timers); void inet_csk_clear_xmit_timers(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; sk_stop_timer(sk, &icsk->icsk_retransmit_timer); sk_stop_timer(sk, &icsk->icsk_delack_timer); sk_stop_timer(sk, &sk->sk_timer); } EXPORT_SYMBOL(inet_csk_clear_xmit_timers); void inet_csk_delete_keepalive_timer(struct sock *sk) { sk_stop_timer(sk, &sk->sk_timer); } EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) { sk_reset_timer(sk, &sk->sk_timer, jiffies + len); } EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); struct dst_entry *inet_csk_route_req(struct sock *sk, const struct request_sock *req) { struct rtable *rt; const struct inet_request_sock *ireq = inet_rsk(req); struct ip_options *opt = inet_rsk(req)->opt; struct flowi fl = { .oif = sk->sk_bound_dev_if, .mark = sk->sk_mark, .nl_u = { .ip4_u = { .daddr = ((opt && opt->srr) ? opt->faddr : ireq->rmt_addr), .saddr = ireq->loc_addr, .tos = RT_CONN_FLAGS(sk) } }, .proto = sk->sk_protocol, .flags = inet_sk_flowi_flags(sk), .uli_u = { .ports = { .sport = inet_sk(sk)->inet_sport, .dport = ireq->rmt_port } } }; struct net *net = sock_net(sk); security_req_classify_flow(req, &fl); if (ip_route_output_flow(net, &rt, &fl, sk, 0)) goto no_route; if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) goto route_err; return &rt->u.dst; route_err: ip_rt_put(rt); no_route: IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); return NULL; } EXPORT_SYMBOL_GPL(inet_csk_route_req); static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, const u32 rnd, const u32 synq_hsize) { return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1); } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #define AF_INET_FAMILY(fam) ((fam) == AF_INET) #else #define AF_INET_FAMILY(fam) 1 #endif struct request_sock *inet_csk_search_req(const struct sock *sk, struct request_sock ***prevp, const __be16 rport, const __be32 raddr, const __be32 laddr) { const struct inet_connection_sock *icsk = inet_csk(sk); struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; struct request_sock *req, **prev; for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd, lopt->nr_table_entries)]; (req = *prev) != NULL; prev = &req->dl_next) { const struct inet_request_sock *ireq = inet_rsk(req); if (ireq->rmt_port == rport && ireq->rmt_addr == raddr && ireq->loc_addr == laddr && AF_INET_FAMILY(req->rsk_ops->family)) { WARN_ON(req->sk); *prevp = prev; break; } } return req; } EXPORT_SYMBOL_GPL(inet_csk_search_req); void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, unsigned long timeout) { struct inet_connection_sock *icsk = inet_csk(sk); struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd, lopt->nr_table_entries); reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); inet_csk_reqsk_queue_added(sk, timeout); } /* Only thing we need from tcp.h */ extern int sysctl_tcp_synack_retries; EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); /* Decide when to expire the request and when to resend SYN-ACK */ static inline void syn_ack_recalc(struct request_sock *req, const int thresh, const int max_retries, const u8 rskq_defer_accept, int *expire, int *resend) { if (!rskq_defer_accept) { *expire = req->retrans >= thresh; *resend = 1; return; } *expire = req->retrans >= thresh && (!inet_rsk(req)->acked || req->retrans >= max_retries); /* * Do not resend while waiting for data after ACK, * start to resend on end of deferring period to give * last chance for data or ACK to create established socket. */ *resend = !inet_rsk(req)->acked || req->retrans >= rskq_defer_accept - 1; } void inet_csk_reqsk_queue_prune(struct sock *parent, const unsigned long interval, const unsigned long timeout, const unsigned long max_rto) { struct inet_connection_sock *icsk = inet_csk(parent); struct request_sock_queue *queue = &icsk->icsk_accept_queue; struct listen_sock *lopt = queue->listen_opt; int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; int thresh = max_retries; unsigned long now = jiffies; struct request_sock **reqp, *req; int i, budget; if (lopt == NULL || lopt->qlen == 0) return; /* Normally all the openreqs are young and become mature * (i.e. converted to established socket) for first timeout. * If synack was not acknowledged for 3 seconds, it means * one of the following things: synack was lost, ack was lost, * rtt is high or nobody planned to ack (i.e. synflood). * When server is a bit loaded, queue is populated with old * open requests, reducing effective size of queue. * When server is well loaded, queue size reduces to zero * after several minutes of work. It is not synflood, * it is normal operation. The solution is pruning * too old entries overriding normal timeout, when * situation becomes dangerous. * * Essentially, we reserve half of room for young * embrions; and abort old ones without pity, if old * ones are about to clog our table. */ if (lopt->qlen>>(lopt->max_qlen_log-1)) { int young = (lopt->qlen_young<<1); while (thresh > 2) { if (lopt->qlen < young) break; thresh--; young <<= 1; } } if (queue->rskq_defer_accept) max_retries = queue->rskq_defer_accept; budget = 2 * (lopt->nr_table_entries / (timeout / interval)); i = lopt->clock_hand; do { reqp=&lopt->syn_table[i]; while ((req = *reqp) != NULL) { if (time_after_eq(now, req->expires)) { int expire = 0, resend = 0; syn_ack_recalc(req, thresh, max_retries, queue->rskq_defer_accept, &expire, &resend); if (req->rsk_ops->syn_ack_timeout) req->rsk_ops->syn_ack_timeout(parent, req); if (!expire && (!resend || !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || inet_rsk(req)->acked)) { unsigned long timeo; if (req->retrans++ == 0) lopt->qlen_young--; timeo = min((timeout << req->retrans), max_rto); req->expires = now + timeo; reqp = &req->dl_next; continue; } /* Drop this request */ inet_csk_reqsk_queue_unlink(parent, req, reqp); reqsk_queue_removed(queue, req); reqsk_free(req); continue; } reqp = &req->dl_next; } i = (i + 1) & (lopt->nr_table_entries - 1); } while (--budget > 0); lopt->clock_hand = i; if (lopt->qlen) inet_csk_reset_keepalive_timer(parent, interval); } EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, const gfp_t priority) { struct sock *newsk = sk_clone(sk, priority); if (newsk != NULL) { struct inet_connection_sock *newicsk = inet_csk(newsk); newsk->sk_state = TCP_SYN_RECV; newicsk->icsk_bind_hash = NULL; inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port; inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port); inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port; newsk->sk_write_space = sk_stream_write_space; newicsk->icsk_retransmits = 0; newicsk->icsk_backoff = 0; newicsk->icsk_probes_out = 0; /* Deinitialize accept_queue to trap illegal accesses. */ memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); security_inet_csk_clone(newsk, req); } return newsk; } EXPORT_SYMBOL_GPL(inet_csk_clone); /* * At this point, there should be no process reference to this * socket, and thus no user references at all. Therefore we * can assume the socket waitqueue is inactive and nobody will * try to jump onto it. */ void inet_csk_destroy_sock(struct sock *sk) { WARN_ON(sk->sk_state != TCP_CLOSE); WARN_ON(!sock_flag(sk, SOCK_DEAD)); /* It cannot be in hash table! */ WARN_ON(!sk_unhashed(sk)); /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */ WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); sk->sk_prot->destroy(sk); sk_stream_kill_queues(sk); xfrm_sk_free_policy(sk); sk_refcnt_debug_release(sk); percpu_counter_dec(sk->sk_prot->orphan_count); sock_put(sk); } EXPORT_SYMBOL(inet_csk_destroy_sock); int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) { struct inet_sock *inet = inet_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries); if (rc != 0) return rc; sk->sk_max_ack_backlog = 0; sk->sk_ack_backlog = 0; inet_csk_delack_init(sk); /* There is race window here: we announce ourselves listening, * but this transition is still not validated by get_port(). * It is OK, because this socket enters to hash table only * after validation is complete. */ sk->sk_state = TCP_LISTEN; if (!sk->sk_prot->get_port(sk, inet->inet_num)) { inet->inet_sport = htons(inet->inet_num); sk_dst_reset(sk); sk->sk_prot->hash(sk); return 0; } sk->sk_state = TCP_CLOSE; __reqsk_queue_destroy(&icsk->icsk_accept_queue); return -EADDRINUSE; } EXPORT_SYMBOL_GPL(inet_csk_listen_start); /* * This routine closes sockets which have been at least partially * opened, but not yet accepted. */ void inet_csk_listen_stop(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct request_sock *acc_req; struct request_sock *req; inet_csk_delete_keepalive_timer(sk); /* make all the listen_opt local to us */ acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue); /* Following specs, it would be better either to send FIN * (and enter FIN-WAIT-1, it is normal close) * or to send active reset (abort). * Certainly, it is pretty dangerous while synflood, but it is * bad justification for our negligence 8) * To be honest, we are not able to make either * of the variants now. --ANK */ reqsk_queue_destroy(&icsk->icsk_accept_queue); while ((req = acc_req) != NULL) { struct sock *child = req->sk; acc_req = req->dl_next; local_bh_disable(); bh_lock_sock(child); WARN_ON(sock_owned_by_user(child)); sock_hold(child); sk->sk_prot->disconnect(child, O_NONBLOCK); sock_orphan(child); percpu_counter_inc(sk->sk_prot->orphan_count); inet_csk_destroy_sock(child); bh_unlock_sock(child); local_bh_enable(); sock_put(child); sk_acceptq_removed(sk); __reqsk_free(req); } WARN_ON(sk->sk_ack_backlog); } EXPORT_SYMBOL_GPL(inet_csk_listen_stop); void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) { struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; const struct inet_sock *inet = inet_sk(sk); sin->sin_family = AF_INET; sin->sin_addr.s_addr = inet->inet_daddr; sin->sin_port = inet->inet_dport; } EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); #ifdef CONFIG_COMPAT int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_af_ops->compat_getsockopt != NULL) return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, optval, optlen); return icsk->icsk_af_ops->getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt); int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_af_ops->compat_setsockopt != NULL) return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, optval, optlen); return icsk->icsk_af_ops->setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt); #endif
gpl-2.0
santod/nuk3rn3l_htc_msm8960-revamped
drivers/rtc/rtc-isl1208.c
1527
17852
/* * Intersil ISL1208 rtc class driver * * Copyright 2005,2006 Hebert Valerio Riedel <hvr@gnu.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/bcd.h> #include <linux/rtc.h> #define DRV_VERSION "0.3" /* Register map */ /* rtc section */ #define ISL1208_REG_SC 0x00 #define ISL1208_REG_MN 0x01 #define ISL1208_REG_HR 0x02 #define ISL1208_REG_HR_MIL (1<<7) /* 24h/12h mode */ #define ISL1208_REG_HR_PM (1<<5) /* PM/AM bit in 12h mode */ #define ISL1208_REG_DT 0x03 #define ISL1208_REG_MO 0x04 #define ISL1208_REG_YR 0x05 #define ISL1208_REG_DW 0x06 #define ISL1208_RTC_SECTION_LEN 7 /* control/status section */ #define ISL1208_REG_SR 0x07 #define ISL1208_REG_SR_ARST (1<<7) /* auto reset */ #define ISL1208_REG_SR_XTOSCB (1<<6) /* crystal oscillator */ #define ISL1208_REG_SR_WRTC (1<<4) /* write rtc */ #define ISL1208_REG_SR_ALM (1<<2) /* alarm */ #define ISL1208_REG_SR_BAT (1<<1) /* battery */ #define ISL1208_REG_SR_RTCF (1<<0) /* rtc fail */ #define ISL1208_REG_INT 0x08 #define ISL1208_REG_INT_ALME (1<<6) /* alarm enable */ #define ISL1208_REG_INT_IM (1<<7) /* interrupt/alarm mode */ #define ISL1208_REG_09 0x09 /* reserved */ #define ISL1208_REG_ATR 0x0a #define ISL1208_REG_DTR 0x0b /* alarm section */ #define ISL1208_REG_SCA 0x0c #define ISL1208_REG_MNA 0x0d #define ISL1208_REG_HRA 0x0e #define ISL1208_REG_DTA 0x0f #define ISL1208_REG_MOA 0x10 #define ISL1208_REG_DWA 0x11 #define ISL1208_ALARM_SECTION_LEN 6 /* user section */ #define ISL1208_REG_USR1 0x12 #define ISL1208_REG_USR2 0x13 #define ISL1208_USR_SECTION_LEN 2 static struct i2c_driver isl1208_driver; /* block read */ static int isl1208_i2c_read_regs(struct i2c_client *client, u8 reg, u8 buf[], unsigned len) { u8 reg_addr[1] = { reg }; struct i2c_msg msgs[2] = { {client->addr, 0, sizeof(reg_addr), reg_addr} , {client->addr, I2C_M_RD, len, buf} }; int ret; BUG_ON(reg > ISL1208_REG_USR2); BUG_ON(reg + len > ISL1208_REG_USR2 + 1); ret = i2c_transfer(client->adapter, msgs, 2); if (ret > 0) ret = 0; return ret; } /* block write */ static int isl1208_i2c_set_regs(struct i2c_client *client, u8 reg, u8 const buf[], unsigned len) { u8 i2c_buf[ISL1208_REG_USR2 + 2]; struct i2c_msg msgs[1] = { {client->addr, 0, len + 1, i2c_buf} }; int ret; BUG_ON(reg > ISL1208_REG_USR2); BUG_ON(reg + len > ISL1208_REG_USR2 + 1); i2c_buf[0] = reg; memcpy(&i2c_buf[1], &buf[0], len); ret = i2c_transfer(client->adapter, msgs, 1); if (ret > 0) ret = 0; return ret; } /* simple check to see wether we have a isl1208 */ static int isl1208_i2c_validate_client(struct i2c_client *client) { u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, }; u8 zero_mask[ISL1208_RTC_SECTION_LEN] = { 0x80, 0x80, 0x40, 0xc0, 0xe0, 0x00, 0xf8 }; int i; int ret; ret = isl1208_i2c_read_regs(client, 0, regs, ISL1208_RTC_SECTION_LEN); if (ret < 0) return ret; for (i = 0; i < ISL1208_RTC_SECTION_LEN; ++i) { if (regs[i] & zero_mask[i]) /* check if bits are cleared */ return -ENODEV; } return 0; } static int isl1208_i2c_get_sr(struct i2c_client *client) { int sr = i2c_smbus_read_byte_data(client, ISL1208_REG_SR); if (sr < 0) return -EIO; return sr; } static int isl1208_i2c_get_atr(struct i2c_client *client) { int atr = i2c_smbus_read_byte_data(client, ISL1208_REG_ATR); if (atr < 0) return atr; /* The 6bit value in the ATR register controls the load * capacitance C_load * in steps of 0.25pF * * bit (1<<5) of the ATR register is inverted * * C_load(ATR=0x20) = 4.50pF * C_load(ATR=0x00) = 12.50pF * C_load(ATR=0x1f) = 20.25pF * */ atr &= 0x3f; /* mask out lsb */ atr ^= 1 << 5; /* invert 6th bit */ atr += 2 * 9; /* add offset of 4.5pF; unit[atr] = 0.25pF */ return atr; } static int isl1208_i2c_get_dtr(struct i2c_client *client) { int dtr = i2c_smbus_read_byte_data(client, ISL1208_REG_DTR); if (dtr < 0) return -EIO; /* dtr encodes adjustments of {-60,-40,-20,0,20,40,60} ppm */ dtr = ((dtr & 0x3) * 20) * (dtr & (1 << 2) ? -1 : 1); return dtr; } static int isl1208_i2c_get_usr(struct i2c_client *client) { u8 buf[ISL1208_USR_SECTION_LEN] = { 0, }; int ret; ret = isl1208_i2c_read_regs(client, ISL1208_REG_USR1, buf, ISL1208_USR_SECTION_LEN); if (ret < 0) return ret; return (buf[1] << 8) | buf[0]; } static int isl1208_i2c_set_usr(struct i2c_client *client, u16 usr) { u8 buf[ISL1208_USR_SECTION_LEN]; buf[0] = usr & 0xff; buf[1] = (usr >> 8) & 0xff; return isl1208_i2c_set_regs(client, ISL1208_REG_USR1, buf, ISL1208_USR_SECTION_LEN); } static int isl1208_rtc_toggle_alarm(struct i2c_client *client, int enable) { int icr = i2c_smbus_read_byte_data(client, ISL1208_REG_INT); if (icr < 0) { dev_err(&client->dev, "%s: reading INT failed\n", __func__); return icr; } if (enable) icr |= ISL1208_REG_INT_ALME | ISL1208_REG_INT_IM; else icr &= ~(ISL1208_REG_INT_ALME | ISL1208_REG_INT_IM); icr = i2c_smbus_write_byte_data(client, ISL1208_REG_INT, icr); if (icr < 0) { dev_err(&client->dev, "%s: writing INT failed\n", __func__); return icr; } return 0; } static int isl1208_rtc_proc(struct device *dev, struct seq_file *seq) { struct i2c_client *const client = to_i2c_client(dev); int sr, dtr, atr, usr; sr = isl1208_i2c_get_sr(client); if (sr < 0) { dev_err(&client->dev, "%s: reading SR failed\n", __func__); return sr; } seq_printf(seq, "status_reg\t:%s%s%s%s%s%s (0x%.2x)\n", (sr & ISL1208_REG_SR_RTCF) ? " RTCF" : "", (sr & ISL1208_REG_SR_BAT) ? " BAT" : "", (sr & ISL1208_REG_SR_ALM) ? " ALM" : "", (sr & ISL1208_REG_SR_WRTC) ? " WRTC" : "", (sr & ISL1208_REG_SR_XTOSCB) ? " XTOSCB" : "", (sr & ISL1208_REG_SR_ARST) ? " ARST" : "", sr); seq_printf(seq, "batt_status\t: %s\n", (sr & ISL1208_REG_SR_RTCF) ? "bad" : "okay"); dtr = isl1208_i2c_get_dtr(client); if (dtr >= 0 - 1) seq_printf(seq, "digital_trim\t: %d ppm\n", dtr); atr = isl1208_i2c_get_atr(client); if (atr >= 0) seq_printf(seq, "analog_trim\t: %d.%.2d pF\n", atr >> 2, (atr & 0x3) * 25); usr = isl1208_i2c_get_usr(client); if (usr >= 0) seq_printf(seq, "user_data\t: 0x%.4x\n", usr); return 0; } static int isl1208_i2c_read_time(struct i2c_client *client, struct rtc_time *tm) { int sr; u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, }; sr = isl1208_i2c_get_sr(client); if (sr < 0) { dev_err(&client->dev, "%s: reading SR failed\n", __func__); return -EIO; } sr = isl1208_i2c_read_regs(client, 0, regs, ISL1208_RTC_SECTION_LEN); if (sr < 0) { dev_err(&client->dev, "%s: reading RTC section failed\n", __func__); return sr; } tm->tm_sec = bcd2bin(regs[ISL1208_REG_SC]); tm->tm_min = bcd2bin(regs[ISL1208_REG_MN]); /* HR field has a more complex interpretation */ { const u8 _hr = regs[ISL1208_REG_HR]; if (_hr & ISL1208_REG_HR_MIL) /* 24h format */ tm->tm_hour = bcd2bin(_hr & 0x3f); else { /* 12h format */ tm->tm_hour = bcd2bin(_hr & 0x1f); if (_hr & ISL1208_REG_HR_PM) /* PM flag set */ tm->tm_hour += 12; } } tm->tm_mday = bcd2bin(regs[ISL1208_REG_DT]); tm->tm_mon = bcd2bin(regs[ISL1208_REG_MO]) - 1; /* rtc starts at 1 */ tm->tm_year = bcd2bin(regs[ISL1208_REG_YR]) + 100; tm->tm_wday = bcd2bin(regs[ISL1208_REG_DW]); return 0; } static int isl1208_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm) { struct rtc_time *const tm = &alarm->time; u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, }; int icr, yr, sr = isl1208_i2c_get_sr(client); if (sr < 0) { dev_err(&client->dev, "%s: reading SR failed\n", __func__); return sr; } sr = isl1208_i2c_read_regs(client, ISL1208_REG_SCA, regs, ISL1208_ALARM_SECTION_LEN); if (sr < 0) { dev_err(&client->dev, "%s: reading alarm section failed\n", __func__); return sr; } /* MSB of each alarm register is an enable bit */ tm->tm_sec = bcd2bin(regs[ISL1208_REG_SCA - ISL1208_REG_SCA] & 0x7f); tm->tm_min = bcd2bin(regs[ISL1208_REG_MNA - ISL1208_REG_SCA] & 0x7f); tm->tm_hour = bcd2bin(regs[ISL1208_REG_HRA - ISL1208_REG_SCA] & 0x3f); tm->tm_mday = bcd2bin(regs[ISL1208_REG_DTA - ISL1208_REG_SCA] & 0x3f); tm->tm_mon = bcd2bin(regs[ISL1208_REG_MOA - ISL1208_REG_SCA] & 0x1f) - 1; tm->tm_wday = bcd2bin(regs[ISL1208_REG_DWA - ISL1208_REG_SCA] & 0x03); /* The alarm doesn't store the year so get it from the rtc section */ yr = i2c_smbus_read_byte_data(client, ISL1208_REG_YR); if (yr < 0) { dev_err(&client->dev, "%s: reading RTC YR failed\n", __func__); return yr; } tm->tm_year = bcd2bin(yr) + 100; icr = i2c_smbus_read_byte_data(client, ISL1208_REG_INT); if (icr < 0) { dev_err(&client->dev, "%s: reading INT failed\n", __func__); return icr; } alarm->enabled = !!(icr & ISL1208_REG_INT_ALME); return 0; } static int isl1208_i2c_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm) { struct rtc_time *alarm_tm = &alarm->time; u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, }; const int offs = ISL1208_REG_SCA; unsigned long rtc_secs, alarm_secs; struct rtc_time rtc_tm; int err, enable; err = isl1208_i2c_read_time(client, &rtc_tm); if (err) return err; err = rtc_tm_to_time(&rtc_tm, &rtc_secs); if (err) return err; err = rtc_tm_to_time(alarm_tm, &alarm_secs); if (err) return err; /* If the alarm time is before the current time disable the alarm */ if (!alarm->enabled || alarm_secs <= rtc_secs) enable = 0x00; else enable = 0x80; /* Program the alarm and enable it for each setting */ regs[ISL1208_REG_SCA - offs] = bin2bcd(alarm_tm->tm_sec) | enable; regs[ISL1208_REG_MNA - offs] = bin2bcd(alarm_tm->tm_min) | enable; regs[ISL1208_REG_HRA - offs] = bin2bcd(alarm_tm->tm_hour) | ISL1208_REG_HR_MIL | enable; regs[ISL1208_REG_DTA - offs] = bin2bcd(alarm_tm->tm_mday) | enable; regs[ISL1208_REG_MOA - offs] = bin2bcd(alarm_tm->tm_mon + 1) | enable; regs[ISL1208_REG_DWA - offs] = bin2bcd(alarm_tm->tm_wday & 7) | enable; /* write ALARM registers */ err = isl1208_i2c_set_regs(client, offs, regs, ISL1208_ALARM_SECTION_LEN); if (err < 0) { dev_err(&client->dev, "%s: writing ALARM section failed\n", __func__); return err; } err = isl1208_rtc_toggle_alarm(client, enable); if (err) return err; return 0; } static int isl1208_rtc_read_time(struct device *dev, struct rtc_time *tm) { return isl1208_i2c_read_time(to_i2c_client(dev), tm); } static int isl1208_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm) { int sr; u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, }; /* The clock has an 8 bit wide bcd-coded register (they never learn) * for the year. tm_year is an offset from 1900 and we are interested * in the 2000-2099 range, so any value less than 100 is invalid. */ if (tm->tm_year < 100) return -EINVAL; regs[ISL1208_REG_SC] = bin2bcd(tm->tm_sec); regs[ISL1208_REG_MN] = bin2bcd(tm->tm_min); regs[ISL1208_REG_HR] = bin2bcd(tm->tm_hour) | ISL1208_REG_HR_MIL; regs[ISL1208_REG_DT] = bin2bcd(tm->tm_mday); regs[ISL1208_REG_MO] = bin2bcd(tm->tm_mon + 1); regs[ISL1208_REG_YR] = bin2bcd(tm->tm_year - 100); regs[ISL1208_REG_DW] = bin2bcd(tm->tm_wday & 7); sr = isl1208_i2c_get_sr(client); if (sr < 0) { dev_err(&client->dev, "%s: reading SR failed\n", __func__); return sr; } /* set WRTC */ sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr | ISL1208_REG_SR_WRTC); if (sr < 0) { dev_err(&client->dev, "%s: writing SR failed\n", __func__); return sr; } /* write RTC registers */ sr = isl1208_i2c_set_regs(client, 0, regs, ISL1208_RTC_SECTION_LEN); if (sr < 0) { dev_err(&client->dev, "%s: writing RTC section failed\n", __func__); return sr; } /* clear WRTC again */ sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr & ~ISL1208_REG_SR_WRTC); if (sr < 0) { dev_err(&client->dev, "%s: writing SR failed\n", __func__); return sr; } return 0; } static int isl1208_rtc_set_time(struct device *dev, struct rtc_time *tm) { return isl1208_i2c_set_time(to_i2c_client(dev), tm); } static int isl1208_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { return isl1208_i2c_read_alarm(to_i2c_client(dev), alarm); } static int isl1208_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { return isl1208_i2c_set_alarm(to_i2c_client(dev), alarm); } static irqreturn_t isl1208_rtc_interrupt(int irq, void *data) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); struct i2c_client *client = data; struct rtc_device *rtc = i2c_get_clientdata(client); int handled = 0, sr, err; /* * I2C reads get NAK'ed if we read straight away after an interrupt? * Using a mdelay/msleep didn't seem to help either, so we work around * this by continually trying to read the register for a short time. */ while (1) { sr = isl1208_i2c_get_sr(client); if (sr >= 0) break; if (time_after(jiffies, timeout)) { dev_err(&client->dev, "%s: reading SR failed\n", __func__); return sr; } } if (sr & ISL1208_REG_SR_ALM) { dev_dbg(&client->dev, "alarm!\n"); rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); /* Clear the alarm */ sr &= ~ISL1208_REG_SR_ALM; sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr); if (sr < 0) dev_err(&client->dev, "%s: writing SR failed\n", __func__); else handled = 1; /* Disable the alarm */ err = isl1208_rtc_toggle_alarm(client, 0); if (err) return err; } return handled ? IRQ_HANDLED : IRQ_NONE; } static const struct rtc_class_ops isl1208_rtc_ops = { .proc = isl1208_rtc_proc, .read_time = isl1208_rtc_read_time, .set_time = isl1208_rtc_set_time, .read_alarm = isl1208_rtc_read_alarm, .set_alarm = isl1208_rtc_set_alarm, }; /* sysfs interface */ static ssize_t isl1208_sysfs_show_atrim(struct device *dev, struct device_attribute *attr, char *buf) { int atr = isl1208_i2c_get_atr(to_i2c_client(dev)); if (atr < 0) return atr; return sprintf(buf, "%d.%.2d pF\n", atr >> 2, (atr & 0x3) * 25); } static DEVICE_ATTR(atrim, S_IRUGO, isl1208_sysfs_show_atrim, NULL); static ssize_t isl1208_sysfs_show_dtrim(struct device *dev, struct device_attribute *attr, char *buf) { int dtr = isl1208_i2c_get_dtr(to_i2c_client(dev)); if (dtr < 0) return dtr; return sprintf(buf, "%d ppm\n", dtr); } static DEVICE_ATTR(dtrim, S_IRUGO, isl1208_sysfs_show_dtrim, NULL); static ssize_t isl1208_sysfs_show_usr(struct device *dev, struct device_attribute *attr, char *buf) { int usr = isl1208_i2c_get_usr(to_i2c_client(dev)); if (usr < 0) return usr; return sprintf(buf, "0x%.4x\n", usr); } static ssize_t isl1208_sysfs_store_usr(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int usr = -1; if (buf[0] == '0' && (buf[1] == 'x' || buf[1] == 'X')) { if (sscanf(buf, "%x", &usr) != 1) return -EINVAL; } else { if (sscanf(buf, "%d", &usr) != 1) return -EINVAL; } if (usr < 0 || usr > 0xffff) return -EINVAL; return isl1208_i2c_set_usr(to_i2c_client(dev), usr) ? -EIO : count; } static DEVICE_ATTR(usr, S_IRUGO | S_IWUSR, isl1208_sysfs_show_usr, isl1208_sysfs_store_usr); static struct attribute *isl1208_rtc_attrs[] = { &dev_attr_atrim.attr, &dev_attr_dtrim.attr, &dev_attr_usr.attr, NULL }; static const struct attribute_group isl1208_rtc_sysfs_files = { .attrs = isl1208_rtc_attrs, }; static int isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; struct rtc_device *rtc; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; if (isl1208_i2c_validate_client(client) < 0) return -ENODEV; dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); if (client->irq > 0) { rc = request_threaded_irq(client->irq, NULL, isl1208_rtc_interrupt, IRQF_SHARED, isl1208_driver.driver.name, client); if (!rc) { device_init_wakeup(&client->dev, 1); enable_irq_wake(client->irq); } else { dev_err(&client->dev, "Unable to request irq %d, no alarm support\n", client->irq); client->irq = 0; } } rtc = rtc_device_register(isl1208_driver.driver.name, &client->dev, &isl1208_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { rc = PTR_ERR(rtc); goto exit_free_irq; } i2c_set_clientdata(client, rtc); rc = isl1208_i2c_get_sr(client); if (rc < 0) { dev_err(&client->dev, "reading status failed\n"); goto exit_unregister; } if (rc & ISL1208_REG_SR_RTCF) dev_warn(&client->dev, "rtc power failure detected, " "please set clock.\n"); rc = sysfs_create_group(&client->dev.kobj, &isl1208_rtc_sysfs_files); if (rc) goto exit_unregister; return 0; exit_unregister: rtc_device_unregister(rtc); exit_free_irq: if (client->irq) free_irq(client->irq, client); return rc; } static int isl1208_remove(struct i2c_client *client) { struct rtc_device *rtc = i2c_get_clientdata(client); sysfs_remove_group(&client->dev.kobj, &isl1208_rtc_sysfs_files); rtc_device_unregister(rtc); if (client->irq) free_irq(client->irq, client); return 0; } static const struct i2c_device_id isl1208_id[] = { { "isl1208", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, isl1208_id); static struct i2c_driver isl1208_driver = { .driver = { .name = "rtc-isl1208", }, .probe = isl1208_probe, .remove = isl1208_remove, .id_table = isl1208_id, }; module_i2c_driver(isl1208_driver); MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>"); MODULE_DESCRIPTION("Intersil ISL1208 RTC driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
AzraelsKiss/android_kernel_samsung_smdk4412
drivers/staging/pohmelfs/inode.c
2295
50120
/* * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/backing-dev.h> #include <linux/crypto.h> #include <linux/fs.h> #include <linux/jhash.h> #include <linux/hash.h> #include <linux/ktime.h> #include <linux/mm.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/parser.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/statfs.h> #include <linux/writeback.h> #include <linux/prefetch.h> #include "netfs.h" #define POHMELFS_MAGIC_NUM 0x504f482e static struct kmem_cache *pohmelfs_inode_cache; static atomic_t psb_bdi_num = ATOMIC_INIT(0); /* * Removes inode from all trees, drops local name cache and removes all queued * requests for object removal. */ void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi) { mutex_lock(&pi->offset_lock); pohmelfs_free_names(pi); mutex_unlock(&pi->offset_lock); dprintk("%s: deleted stuff in ino: %llu.\n", __func__, pi->ino); } /* * Sync inode to server. * Returns zero in success and negative error value otherwise. * It will gather path to root directory into structures containing * creation mode, permissions and names, so that the whole path * to given inode could be created using only single network command. */ int pohmelfs_write_inode_create(struct inode *inode, struct netfs_trans *trans) { struct pohmelfs_inode *pi = POHMELFS_I(inode); int err = -ENOMEM, size; struct netfs_cmd *cmd; void *data; int cur_len = netfs_trans_cur_len(trans); if (unlikely(cur_len < 0)) return -ETOOSMALL; cmd = netfs_trans_current(trans); cur_len -= sizeof(struct netfs_cmd); data = (void *)(cmd + 1); err = pohmelfs_construct_path_string(pi, data, cur_len); if (err < 0) goto err_out_exit; size = err; cmd->start = i_size_read(inode); cmd->cmd = NETFS_CREATE; cmd->size = size; cmd->id = pi->ino; cmd->ext = inode->i_mode; netfs_convert_cmd(cmd); netfs_trans_update(cmd, trans, size); return 0; err_out_exit: printk("%s: completed ino: %llu, err: %d.\n", __func__, pi->ino, err); return err; } static int pohmelfs_write_trans_complete(struct page **pages, unsigned int page_num, void *private, int err) { unsigned i; dprintk("%s: pages: %lu-%lu, page_num: %u, err: %d.\n", __func__, pages[0]->index, pages[page_num-1]->index, page_num, err); for (i = 0; i < page_num; i++) { struct page *page = pages[i]; if (!page) continue; end_page_writeback(page); if (err < 0) { SetPageError(page); set_page_dirty(page); } unlock_page(page); page_cache_release(page); /* dprintk("%s: %3u/%u: page: %p.\n", __func__, i, page_num, page); */ } return err; } static int pohmelfs_inode_has_dirty_pages(struct address_space *mapping, pgoff_t index) { int ret; struct page *page; rcu_read_lock(); ret = radix_tree_gang_lookup_tag(&mapping->page_tree, (void **)&page, index, 1, PAGECACHE_TAG_DIRTY); rcu_read_unlock(); return ret; } static int pohmelfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; struct pohmelfs_inode *pi = POHMELFS_I(inode); struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); int err = 0; int done = 0; int nr_pages; pgoff_t index; pgoff_t end; /* Inclusive */ int scanned = 0; int range_whole = 0; if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; } else { index = wbc->range_start >> PAGE_CACHE_SHIFT; end = wbc->range_end >> PAGE_CACHE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; scanned = 1; } retry: while (!done && (index <= end)) { unsigned int i = min(end - index, (pgoff_t)psb->trans_max_pages); int path_len; struct netfs_trans *trans; err = pohmelfs_inode_has_dirty_pages(mapping, index); if (!err) break; err = pohmelfs_path_length(pi); if (err < 0) break; path_len = err; if (path_len <= 2) { err = -ENOENT; break; } trans = netfs_trans_alloc(psb, path_len, 0, i); if (!trans) { err = -ENOMEM; break; } trans->complete = &pohmelfs_write_trans_complete; trans->page_num = nr_pages = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY, trans->page_num, trans->pages); dprintk("%s: t: %p, nr_pages: %u, end: %lu, index: %lu, max: %u.\n", __func__, trans, nr_pages, end, index, trans->page_num); if (!nr_pages) goto err_out_reset; err = pohmelfs_write_inode_create(inode, trans); if (err) goto err_out_reset; err = 0; scanned = 1; for (i = 0; i < trans->page_num; i++) { struct page *page = trans->pages[i]; lock_page(page); if (unlikely(page->mapping != mapping)) goto out_continue; if (!wbc->range_cyclic && page->index > end) { done = 1; goto out_continue; } if (wbc->sync_mode != WB_SYNC_NONE) wait_on_page_writeback(page); if (PageWriteback(page) || !clear_page_dirty_for_io(page)) { dprintk("%s: not clear for io page: %p, writeback: %d.\n", __func__, page, PageWriteback(page)); goto out_continue; } set_page_writeback(page); trans->attached_size += page_private(page); trans->attached_pages++; #if 0 dprintk("%s: %u/%u added trans: %p, gen: %u, page: %p, [High: %d], size: %lu, idx: %lu.\n", __func__, i, trans->page_num, trans, trans->gen, page, !!PageHighMem(page), page_private(page), page->index); #endif wbc->nr_to_write--; if (wbc->nr_to_write <= 0) done = 1; continue; out_continue: unlock_page(page); trans->pages[i] = NULL; } err = netfs_trans_finish(trans, psb); if (err) break; continue; err_out_reset: trans->result = err; netfs_trans_reset(trans); netfs_trans_put(trans); break; } if (!scanned && !done) { /* * We hit the last page and there is more work to be done: wrap * back to the start of the file */ scanned = 1; index = 0; goto retry; } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; return err; } /* * Inode writeback creation completion callback. * Only invoked for just created inodes, which do not have pages attached, * like dirs and empty files. */ static int pohmelfs_write_inode_complete(struct page **pages, unsigned int page_num, void *private, int err) { struct inode *inode = private; struct pohmelfs_inode *pi = POHMELFS_I(inode); if (inode) { if (err) { mark_inode_dirty(inode); clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state); } else { set_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state); } pohmelfs_put_inode(pi); } return err; } int pohmelfs_write_create_inode(struct pohmelfs_inode *pi) { struct netfs_trans *t; struct inode *inode = &pi->vfs_inode; struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); int err; if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state)) return 0; dprintk("%s: started ino: %llu.\n", __func__, pi->ino); err = pohmelfs_path_length(pi); if (err < 0) goto err_out_exit; t = netfs_trans_alloc(psb, err + 1, 0, 0); if (!t) { err = -ENOMEM; goto err_out_exit; } t->complete = pohmelfs_write_inode_complete; t->private = igrab(inode); if (!t->private) { err = -ENOENT; goto err_out_put; } err = pohmelfs_write_inode_create(inode, t); if (err) goto err_out_put; netfs_trans_finish(t, POHMELFS_SB(inode->i_sb)); return 0; err_out_put: t->result = err; netfs_trans_put(t); err_out_exit: return err; } /* * Sync all not-yet-created children in given directory to the server. */ static int pohmelfs_write_inode_create_children(struct inode *inode) { struct pohmelfs_inode *parent = POHMELFS_I(inode); struct super_block *sb = inode->i_sb; struct pohmelfs_name *n; while (!list_empty(&parent->sync_create_list)) { n = NULL; mutex_lock(&parent->offset_lock); if (!list_empty(&parent->sync_create_list)) { n = list_first_entry(&parent->sync_create_list, struct pohmelfs_name, sync_create_entry); list_del_init(&n->sync_create_entry); } mutex_unlock(&parent->offset_lock); if (!n) break; inode = ilookup(sb, n->ino); dprintk("%s: parent: %llu, ino: %llu, inode: %p.\n", __func__, parent->ino, n->ino, inode); if (inode && (inode->i_state & I_DIRTY)) { struct pohmelfs_inode *pi = POHMELFS_I(inode); pohmelfs_write_create_inode(pi); /* pohmelfs_meta_command(pi, NETFS_INODE_INFO, 0, NULL, NULL, 0); */ iput(inode); } } return 0; } /* * Removes given child from given inode on server. */ int pohmelfs_remove_child(struct pohmelfs_inode *pi, struct pohmelfs_name *n) { return pohmelfs_meta_command_data(pi, pi->ino, NETFS_REMOVE, NULL, 0, NULL, NULL, 0); } /* * Writeback for given inode. */ static int pohmelfs_write_inode(struct inode *inode, struct writeback_control *wbc) { struct pohmelfs_inode *pi = POHMELFS_I(inode); pohmelfs_write_create_inode(pi); pohmelfs_write_inode_create_children(inode); return 0; } /* * It is not exported, sorry... */ static inline wait_queue_head_t *page_waitqueue(struct page *page) { const struct zone *zone = page_zone(page); return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; } static int pohmelfs_wait_on_page_locked(struct page *page) { struct pohmelfs_sb *psb = POHMELFS_SB(page->mapping->host->i_sb); long ret = psb->wait_on_page_timeout; DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); int err = 0; if (!PageLocked(page)) return 0; for (;;) { prepare_to_wait(page_waitqueue(page), &wait.wait, TASK_INTERRUPTIBLE); dprintk("%s: page: %p, locked: %d, uptodate: %d, error: %d, flags: %lx.\n", __func__, page, PageLocked(page), PageUptodate(page), PageError(page), page->flags); if (!PageLocked(page)) break; if (!signal_pending(current)) { ret = schedule_timeout(ret); if (!ret) break; continue; } ret = -ERESTARTSYS; break; } finish_wait(page_waitqueue(page), &wait.wait); if (!ret) err = -ETIMEDOUT; if (!err) SetPageUptodate(page); if (err) printk("%s: page: %p, uptodate: %d, locked: %d, err: %d.\n", __func__, page, PageUptodate(page), PageLocked(page), err); return err; } static int pohmelfs_read_page_complete(struct page **pages, unsigned int page_num, void *private, int err) { struct page *page = private; if (PageChecked(page)) return err; if (err < 0) { dprintk("%s: page: %p, err: %d.\n", __func__, page, err); SetPageError(page); } unlock_page(page); return err; } /* * Read a page from remote server. * Function will wait until page is unlocked. */ static int pohmelfs_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); struct pohmelfs_inode *pi = POHMELFS_I(inode); struct netfs_trans *t; struct netfs_cmd *cmd; int err, path_len; void *data; u64 isize; err = pohmelfs_data_lock(pi, page->index << PAGE_CACHE_SHIFT, PAGE_SIZE, POHMELFS_READ_LOCK); if (err) goto err_out_exit; isize = i_size_read(inode); if (isize <= page->index << PAGE_CACHE_SHIFT) { SetPageUptodate(page); unlock_page(page); return 0; } path_len = pohmelfs_path_length(pi); if (path_len < 0) { err = path_len; goto err_out_exit; } t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0); if (!t) { err = -ENOMEM; goto err_out_exit; } t->complete = pohmelfs_read_page_complete; t->private = page; cmd = netfs_trans_current(t); data = (void *)(cmd + 1); err = pohmelfs_construct_path_string(pi, data, path_len); if (err < 0) goto err_out_free; path_len = err; cmd->id = pi->ino; cmd->start = page->index; cmd->start <<= PAGE_CACHE_SHIFT; cmd->size = PAGE_CACHE_SIZE + path_len; cmd->cmd = NETFS_READ_PAGE; cmd->ext = path_len; dprintk("%s: path: '%s', page: %p, ino: %llu, start: %llu, size: %lu.\n", __func__, (char *)data, page, pi->ino, cmd->start, PAGE_CACHE_SIZE); netfs_convert_cmd(cmd); netfs_trans_update(cmd, t, path_len); err = netfs_trans_finish(t, psb); if (err) goto err_out_return; return pohmelfs_wait_on_page_locked(page); err_out_free: t->result = err; netfs_trans_put(t); err_out_exit: SetPageError(page); if (PageLocked(page)) unlock_page(page); err_out_return: printk("%s: page: %p, start: %lu, size: %lu, err: %d.\n", __func__, page, page->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, err); return err; } /* * Write begin/end magic. * Allocates a page and writes inode if it was not synced to server before. */ static int pohmelfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct page *page; pgoff_t index; unsigned start, end; int err; *pagep = NULL; index = pos >> PAGE_CACHE_SHIFT; start = pos & (PAGE_CACHE_SIZE - 1); end = start + len; page = grab_cache_page(mapping, index); #if 0 dprintk("%s: page: %p pos: %llu, len: %u, index: %lu, start: %u, end: %u, uptodate: %d.\n", __func__, page, pos, len, index, start, end, PageUptodate(page)); #endif if (!page) { err = -ENOMEM; goto err_out_exit; } while (!PageUptodate(page)) { if (start && test_bit(NETFS_INODE_REMOTE_SYNCED, &POHMELFS_I(inode)->state)) { err = pohmelfs_readpage(file, page); if (err) goto err_out_exit; lock_page(page); continue; } if (len != PAGE_CACHE_SIZE) { void *kaddr = kmap_atomic(page, KM_USER0); memset(kaddr + start, 0, PAGE_CACHE_SIZE - start); flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); } SetPageUptodate(page); } set_page_private(page, end); *pagep = page; return 0; err_out_exit: page_cache_release(page); *pagep = NULL; return err; } static int pohmelfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; if (copied != len) { unsigned from = pos & (PAGE_CACHE_SIZE - 1); void *kaddr = kmap_atomic(page, KM_USER0); memset(kaddr + from + copied, 0, len - copied); flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); } SetPageUptodate(page); set_page_dirty(page); #if 0 dprintk("%s: page: %p [U: %d, D: %d, L: %d], pos: %llu, len: %u, copied: %u.\n", __func__, page, PageUptodate(page), PageDirty(page), PageLocked(page), pos, len, copied); #endif flush_dcache_page(page); unlock_page(page); page_cache_release(page); if (pos + copied > inode->i_size) { struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); psb->avail_size -= pos + copied - inode->i_size; i_size_write(inode, pos + copied); } return copied; } static int pohmelfs_readpages_trans_complete(struct page **__pages, unsigned int page_num, void *private, int err) { struct pohmelfs_inode *pi = private; unsigned int i, num; struct page **pages, *page = (struct page *)__pages; loff_t index = page->index; pages = kzalloc(sizeof(void *) * page_num, GFP_NOIO); if (!pages) return -ENOMEM; num = find_get_pages_contig(pi->vfs_inode.i_mapping, index, page_num, pages); if (num <= 0) { err = num; goto err_out_free; } for (i = 0; i < num; ++i) { page = pages[i]; if (err) printk("%s: %u/%u: page: %p, index: %lu, uptodate: %d, locked: %d, err: %d.\n", __func__, i, num, page, page->index, PageUptodate(page), PageLocked(page), err); if (!PageChecked(page)) { if (err < 0) SetPageError(page); unlock_page(page); } page_cache_release(page); page_cache_release(page); } err_out_free: kfree(pages); return err; } static int pohmelfs_send_readpages(struct pohmelfs_inode *pi, struct page *first, unsigned int num) { struct netfs_trans *t; struct netfs_cmd *cmd; struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb); int err, path_len; void *data; err = pohmelfs_data_lock(pi, first->index << PAGE_CACHE_SHIFT, num * PAGE_SIZE, POHMELFS_READ_LOCK); if (err) goto err_out_exit; path_len = pohmelfs_path_length(pi); if (path_len < 0) { err = path_len; goto err_out_exit; } t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0); if (!t) { err = -ENOMEM; goto err_out_exit; } cmd = netfs_trans_current(t); data = (void *)(cmd + 1); t->complete = pohmelfs_readpages_trans_complete; t->private = pi; t->page_num = num; t->pages = (struct page **)first; err = pohmelfs_construct_path_string(pi, data, path_len); if (err < 0) goto err_out_put; path_len = err; cmd->cmd = NETFS_READ_PAGES; cmd->start = first->index; cmd->start <<= PAGE_CACHE_SHIFT; cmd->size = (num << 8 | PAGE_CACHE_SHIFT); cmd->id = pi->ino; cmd->ext = path_len; dprintk("%s: t: %p, gen: %u, path: '%s', path_len: %u, " "start: %lu, num: %u.\n", __func__, t, t->gen, (char *)data, path_len, first->index, num); netfs_convert_cmd(cmd); netfs_trans_update(cmd, t, path_len); return netfs_trans_finish(t, psb); err_out_put: netfs_trans_free(t); err_out_exit: pohmelfs_readpages_trans_complete((struct page **)first, num, pi, err); return err; } #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) static int pohmelfs_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { unsigned int page_idx, num = 0; struct page *page = NULL, *first = NULL; for (page_idx = 0; page_idx < nr_pages; page_idx++) { page = list_to_page(pages); prefetchw(&page->flags); list_del(&page->lru); if (!add_to_page_cache_lru(page, mapping, page->index, GFP_KERNEL)) { if (!num) { num = 1; first = page; continue; } dprintk("%s: added to lru page: %p, page_index: %lu, first_index: %lu.\n", __func__, page, page->index, first->index); if (unlikely(first->index + num != page->index) || (num > 500)) { pohmelfs_send_readpages(POHMELFS_I(mapping->host), first, num); first = page; num = 0; } num++; } } pohmelfs_send_readpages(POHMELFS_I(mapping->host), first, num); /* * This will be sync read, so when last page is processed, * all previous are alerady unlocked and ready to be used. */ return 0; } /* * Small address space operations for POHMELFS. */ const struct address_space_operations pohmelfs_aops = { .readpage = pohmelfs_readpage, .readpages = pohmelfs_readpages, .writepages = pohmelfs_writepages, .write_begin = pohmelfs_write_begin, .write_end = pohmelfs_write_end, .set_page_dirty = __set_page_dirty_nobuffers, }; static void pohmelfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); INIT_LIST_HEAD(&inode->i_dentry); kmem_cache_free(pohmelfs_inode_cache, POHMELFS_I(inode)); } /* * ->destroy_inode() callback. Deletes inode from the caches * and frees private data. */ static void pohmelfs_destroy_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; struct pohmelfs_sb *psb = POHMELFS_SB(sb); struct pohmelfs_inode *pi = POHMELFS_I(inode); /* pohmelfs_data_unlock(pi, 0, inode->i_size, POHMELFS_READ_LOCK); */ pohmelfs_inode_del_inode(psb, pi); dprintk("%s: pi: %p, inode: %p, ino: %llu.\n", __func__, pi, &pi->vfs_inode, pi->ino); atomic_long_dec(&psb->total_inodes); call_rcu(&inode->i_rcu, pohmelfs_i_callback); } /* * ->alloc_inode() callback. Allocates inode and initializes private data. */ static struct inode *pohmelfs_alloc_inode(struct super_block *sb) { struct pohmelfs_inode *pi; pi = kmem_cache_alloc(pohmelfs_inode_cache, GFP_NOIO); if (!pi) return NULL; pi->hash_root = RB_ROOT; mutex_init(&pi->offset_lock); INIT_LIST_HEAD(&pi->sync_create_list); INIT_LIST_HEAD(&pi->inode_entry); pi->lock_type = 0; pi->state = 0; pi->total_len = 0; pi->drop_count = 0; dprintk("%s: pi: %p, inode: %p.\n", __func__, pi, &pi->vfs_inode); atomic_long_inc(&POHMELFS_SB(sb)->total_inodes); return &pi->vfs_inode; } /* * We want fsync() to work on POHMELFS. */ static int pohmelfs_fsync(struct file *file, int datasync) { struct inode *inode = file->f_mapping->host; return sync_inode_metadata(inode, 1); } ssize_t pohmelfs_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct pohmelfs_inode *pi = POHMELFS_I(inode); struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len }; struct kiocb kiocb; ssize_t ret; loff_t pos = *ppos; init_sync_kiocb(&kiocb, file); kiocb.ki_pos = pos; kiocb.ki_left = len; dprintk("%s: len: %zu, pos: %llu.\n", __func__, len, pos); mutex_lock(&inode->i_mutex); ret = pohmelfs_data_lock(pi, pos, len, POHMELFS_WRITE_LOCK); if (ret) goto err_out_unlock; ret = __generic_file_aio_write(&kiocb, &iov, 1, &kiocb.ki_pos); *ppos = kiocb.ki_pos; mutex_unlock(&inode->i_mutex); WARN_ON(ret < 0); if (ret > 0) { ssize_t err; err = generic_write_sync(file, pos, ret); if (err < 0) ret = err; WARN_ON(ret < 0); } return ret; err_out_unlock: mutex_unlock(&inode->i_mutex); return ret; } static const struct file_operations pohmelfs_file_ops = { .open = generic_file_open, .fsync = pohmelfs_fsync, .llseek = generic_file_llseek, .read = do_sync_read, .aio_read = generic_file_aio_read, .mmap = generic_file_mmap, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, .write = pohmelfs_write, .aio_write = generic_file_aio_write, }; const struct inode_operations pohmelfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, }; int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr) { int err; err = inode_change_ok(inode, attr); if (err) { dprintk("%s: ino: %llu, inode changes are not allowed.\n", __func__, POHMELFS_I(inode)->ino); goto err_out_exit; } if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { err = vmtruncate(inode, attr->ia_size); if (err) { dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino); goto err_out_exit; } } setattr_copy(inode, attr); mark_inode_dirty(inode); dprintk("%s: ino: %llu, mode: %o -> %o, uid: %u -> %u, gid: %u -> %u, size: %llu -> %llu.\n", __func__, POHMELFS_I(inode)->ino, inode->i_mode, attr->ia_mode, inode->i_uid, attr->ia_uid, inode->i_gid, attr->ia_gid, inode->i_size, attr->ia_size); return 0; err_out_exit: return err; } int pohmelfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; struct pohmelfs_inode *pi = POHMELFS_I(inode); int err; err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_WRITE_LOCK); if (err) goto err_out_exit; err = security_inode_setattr(dentry, attr); if (err) goto err_out_exit; err = pohmelfs_setattr_raw(inode, attr); if (err) goto err_out_exit; return 0; err_out_exit: return err; } static int pohmelfs_send_xattr_req(struct pohmelfs_inode *pi, u64 id, u64 start, const char *name, const void *value, size_t attrsize, int command) { struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb); int err, path_len, namelen = strlen(name) + 1; /* 0-byte */ struct netfs_trans *t; struct netfs_cmd *cmd; void *data; dprintk("%s: id: %llu, start: %llu, name: '%s', attrsize: %zu, cmd: %d.\n", __func__, id, start, name, attrsize, command); path_len = pohmelfs_path_length(pi); if (path_len < 0) { err = path_len; goto err_out_exit; } t = netfs_trans_alloc(psb, namelen + path_len + attrsize, 0, 0); if (!t) { err = -ENOMEM; goto err_out_exit; } cmd = netfs_trans_current(t); data = cmd + 1; path_len = pohmelfs_construct_path_string(pi, data, path_len); if (path_len < 0) { err = path_len; goto err_out_put; } data += path_len; /* * 'name' is a NUL-terminated string already and * 'namelen' includes 0-byte. */ memcpy(data, name, namelen); data += namelen; memcpy(data, value, attrsize); cmd->cmd = command; cmd->id = id; cmd->start = start; cmd->size = attrsize + namelen + path_len; cmd->ext = path_len; cmd->csize = 0; cmd->cpad = 0; netfs_convert_cmd(cmd); netfs_trans_update(cmd, t, namelen + path_len + attrsize); return netfs_trans_finish(t, psb); err_out_put: t->result = err; netfs_trans_put(t); err_out_exit: return err; } static int pohmelfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t attrsize, int flags) { struct inode *inode = dentry->d_inode; struct pohmelfs_inode *pi = POHMELFS_I(inode); struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); if (!(psb->state_flags & POHMELFS_FLAGS_XATTR)) return -EOPNOTSUPP; return pohmelfs_send_xattr_req(pi, flags, attrsize, name, value, attrsize, NETFS_XATTR_SET); } static ssize_t pohmelfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t attrsize) { struct inode *inode = dentry->d_inode; struct pohmelfs_inode *pi = POHMELFS_I(inode); struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); struct pohmelfs_mcache *m; int err; long timeout = psb->mcache_timeout; if (!(psb->state_flags & POHMELFS_FLAGS_XATTR)) return -EOPNOTSUPP; m = pohmelfs_mcache_alloc(psb, 0, attrsize, value); if (IS_ERR(m)) return PTR_ERR(m); dprintk("%s: ino: %llu, name: '%s', size: %zu.\n", __func__, pi->ino, name, attrsize); err = pohmelfs_send_xattr_req(pi, m->gen, attrsize, name, value, 0, NETFS_XATTR_GET); if (err) goto err_out_put; do { err = wait_for_completion_timeout(&m->complete, timeout); if (err) { err = m->err; break; } /* * This loop is a bit ugly, since it waits until reference counter * hits 1 and then puts the object here. Main goal is to prevent race with * the network thread, when it can start processing the given request, i.e. * increase its reference counter but yet not complete it, while * we will exit from ->getxattr() with timeout, and although request * will not be freed (its reference counter was increased by network * thread), data pointer provided by user may be released, so we will * overwrite an already freed area in the network thread. * * Now after timeout we remove request from the cache, so it can not be * found by network thread, and wait for its reference counter to hit 1, * i.e. if network thread already started to process this request, we wait * for it to finish, and then free object locally. If reference counter is * already 1, i.e. request is not used by anyone else, we can free it without * problem. */ err = -ETIMEDOUT; timeout = HZ; pohmelfs_mcache_remove_locked(psb, m); } while (atomic_read(&m->refcnt) != 1); pohmelfs_mcache_put(psb, m); dprintk("%s: ino: %llu, err: %d.\n", __func__, pi->ino, err); return err; err_out_put: pohmelfs_mcache_put(psb, m); return err; } static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; #if 0 struct pohmelfs_inode *pi = POHMELFS_I(inode); int err; err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK); if (err) return err; dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n", __func__, pi->ino, inode->i_mode, inode->i_uid, inode->i_gid, inode->i_size); #endif generic_fillattr(inode, stat); return 0; } const struct inode_operations pohmelfs_file_inode_operations = { .setattr = pohmelfs_setattr, .getattr = pohmelfs_getattr, .setxattr = pohmelfs_setxattr, .getxattr = pohmelfs_getxattr, }; /* * Fill inode data: mode, size, operation callbacks and so on... */ void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info) { inode->i_mode = info->mode; inode->i_nlink = info->nlink; inode->i_uid = info->uid; inode->i_gid = info->gid; inode->i_blocks = info->blocks; inode->i_rdev = info->rdev; inode->i_size = info->size; inode->i_version = info->version; inode->i_blkbits = ffs(info->blocksize); dprintk("%s: inode: %p, num: %lu/%llu inode is regular: %d, dir: %d, link: %d, mode: %o, size: %llu.\n", __func__, inode, inode->i_ino, info->ino, S_ISREG(inode->i_mode), S_ISDIR(inode->i_mode), S_ISLNK(inode->i_mode), inode->i_mode, inode->i_size); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; /* * i_mapping is a pointer to i_data during inode initialization. */ inode->i_data.a_ops = &pohmelfs_aops; if (S_ISREG(inode->i_mode)) { inode->i_fop = &pohmelfs_file_ops; inode->i_op = &pohmelfs_file_inode_operations; } else if (S_ISDIR(inode->i_mode)) { inode->i_fop = &pohmelfs_dir_fops; inode->i_op = &pohmelfs_dir_inode_ops; } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &pohmelfs_symlink_inode_operations; inode->i_fop = &pohmelfs_file_ops; } else { inode->i_fop = &generic_ro_fops; } } static int pohmelfs_drop_inode(struct inode *inode) { struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); struct pohmelfs_inode *pi = POHMELFS_I(inode); spin_lock(&psb->ino_lock); list_del_init(&pi->inode_entry); spin_unlock(&psb->ino_lock); return generic_drop_inode(inode); } static struct pohmelfs_inode *pohmelfs_get_inode_from_list(struct pohmelfs_sb *psb, struct list_head *head, unsigned int *count) { struct pohmelfs_inode *pi = NULL; spin_lock(&psb->ino_lock); if (!list_empty(head)) { pi = list_entry(head->next, struct pohmelfs_inode, inode_entry); list_del_init(&pi->inode_entry); *count = pi->drop_count; pi->drop_count = 0; } spin_unlock(&psb->ino_lock); return pi; } static void pohmelfs_flush_transactions(struct pohmelfs_sb *psb) { struct pohmelfs_config *c; mutex_lock(&psb->state_lock); list_for_each_entry(c, &psb->state_list, config_entry) { pohmelfs_state_flush_transactions(&c->state); } mutex_unlock(&psb->state_lock); } /* * ->put_super() callback. Invoked before superblock is destroyed, * so it has to clean all private data. */ static void pohmelfs_put_super(struct super_block *sb) { struct pohmelfs_sb *psb = POHMELFS_SB(sb); struct pohmelfs_inode *pi; unsigned int count = 0; unsigned int in_drop_list = 0; struct inode *inode, *tmp; dprintk("%s.\n", __func__); /* * Kill pending transactions, which could affect inodes in-flight. */ pohmelfs_flush_transactions(psb); while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count))) { inode = &pi->vfs_inode; dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n", __func__, pi->ino, pi, inode, count); if (atomic_read(&inode->i_count) != count) { printk("%s: ino: %llu, pi: %p, inode: %p, count: %u, i_count: %d.\n", __func__, pi->ino, pi, inode, count, atomic_read(&inode->i_count)); count = atomic_read(&inode->i_count); in_drop_list++; } while (count--) iput(&pi->vfs_inode); } list_for_each_entry_safe(inode, tmp, &sb->s_inodes, i_sb_list) { pi = POHMELFS_I(inode); dprintk("%s: ino: %llu, pi: %p, inode: %p, i_count: %u.\n", __func__, pi->ino, pi, inode, atomic_read(&inode->i_count)); /* * These are special inodes, they were created during * directory reading or lookup, and were not bound to dentry, * so they live here with reference counter being 1 and prevent * umount from succeed since it believes that they are busy. */ count = atomic_read(&inode->i_count); if (count) { list_del_init(&inode->i_sb_list); while (count--) iput(&pi->vfs_inode); } } psb->trans_scan_timeout = psb->drop_scan_timeout = 0; cancel_delayed_work_sync(&psb->dwork); cancel_delayed_work_sync(&psb->drop_dwork); flush_scheduled_work(); dprintk("%s: stopped workqueues.\n", __func__); pohmelfs_crypto_exit(psb); pohmelfs_state_exit(psb); bdi_destroy(&psb->bdi); kfree(psb); sb->s_fs_info = NULL; } static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct pohmelfs_sb *psb = POHMELFS_SB(sb); /* * There are no filesystem size limits yet. */ memset(buf, 0, sizeof(struct kstatfs)); buf->f_type = POHMELFS_MAGIC_NUM; /* 'POH.' */ buf->f_bsize = sb->s_blocksize; buf->f_files = psb->ino; buf->f_namelen = 255; buf->f_files = atomic_long_read(&psb->total_inodes); buf->f_bfree = buf->f_bavail = psb->avail_size >> PAGE_SHIFT; buf->f_blocks = psb->total_size >> PAGE_SHIFT; dprintk("%s: total: %llu, avail: %llu, inodes: %llu, bsize: %lu.\n", __func__, psb->total_size, psb->avail_size, buf->f_files, sb->s_blocksize); return 0; } static int pohmelfs_show_options(struct seq_file *seq, struct vfsmount *vfs) { struct pohmelfs_sb *psb = POHMELFS_SB(vfs->mnt_sb); seq_printf(seq, ",idx=%u", psb->idx); seq_printf(seq, ",trans_scan_timeout=%u", jiffies_to_msecs(psb->trans_scan_timeout)); seq_printf(seq, ",drop_scan_timeout=%u", jiffies_to_msecs(psb->drop_scan_timeout)); seq_printf(seq, ",wait_on_page_timeout=%u", jiffies_to_msecs(psb->wait_on_page_timeout)); seq_printf(seq, ",trans_retries=%u", psb->trans_retries); seq_printf(seq, ",crypto_thread_num=%u", psb->crypto_thread_num); seq_printf(seq, ",trans_max_pages=%u", psb->trans_max_pages); seq_printf(seq, ",mcache_timeout=%u", jiffies_to_msecs(psb->mcache_timeout)); if (psb->crypto_fail_unsupported) seq_printf(seq, ",crypto_fail_unsupported"); return 0; } enum { pohmelfs_opt_idx, pohmelfs_opt_crypto_thread_num, pohmelfs_opt_trans_max_pages, pohmelfs_opt_crypto_fail_unsupported, /* Remountable options */ pohmelfs_opt_trans_scan_timeout, pohmelfs_opt_drop_scan_timeout, pohmelfs_opt_wait_on_page_timeout, pohmelfs_opt_trans_retries, pohmelfs_opt_mcache_timeout, }; static struct match_token pohmelfs_tokens[] = { {pohmelfs_opt_idx, "idx=%u"}, {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"}, {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"}, {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"}, {pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"}, {pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"}, {pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"}, {pohmelfs_opt_trans_retries, "trans_retries=%u"}, {pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"}, }; static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb, int remount) { char *p; substring_t args[MAX_OPT_ARGS]; int option, err; if (!options) return 0; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; token = match_token(p, pohmelfs_tokens, args); err = match_int(&args[0], &option); if (err) return err; if (remount && token <= pohmelfs_opt_crypto_fail_unsupported) continue; switch (token) { case pohmelfs_opt_idx: psb->idx = option; break; case pohmelfs_opt_trans_scan_timeout: psb->trans_scan_timeout = msecs_to_jiffies(option); break; case pohmelfs_opt_drop_scan_timeout: psb->drop_scan_timeout = msecs_to_jiffies(option); break; case pohmelfs_opt_wait_on_page_timeout: psb->wait_on_page_timeout = msecs_to_jiffies(option); break; case pohmelfs_opt_mcache_timeout: psb->mcache_timeout = msecs_to_jiffies(option); break; case pohmelfs_opt_trans_retries: psb->trans_retries = option; break; case pohmelfs_opt_crypto_thread_num: psb->crypto_thread_num = option; break; case pohmelfs_opt_trans_max_pages: psb->trans_max_pages = option; break; case pohmelfs_opt_crypto_fail_unsupported: psb->crypto_fail_unsupported = 1; break; default: return -EINVAL; } } return 0; } static int pohmelfs_remount(struct super_block *sb, int *flags, char *data) { int err; struct pohmelfs_sb *psb = POHMELFS_SB(sb); unsigned long old_sb_flags = sb->s_flags; err = pohmelfs_parse_options(data, psb, 1); if (err) goto err_out_restore; if (!(*flags & MS_RDONLY)) sb->s_flags &= ~MS_RDONLY; return 0; err_out_restore: sb->s_flags = old_sb_flags; return err; } static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count) { struct inode *inode = &pi->vfs_inode; dprintk("%s: %p: ino: %llu, owned: %d.\n", __func__, inode, pi->ino, test_bit(NETFS_INODE_OWNED, &pi->state)); mutex_lock(&inode->i_mutex); if (test_and_clear_bit(NETFS_INODE_OWNED, &pi->state)) { filemap_fdatawrite(inode->i_mapping); inode->i_sb->s_op->write_inode(inode, 0); } #ifdef POHMELFS_TRUNCATE_ON_INODE_FLUSH truncate_inode_pages(inode->i_mapping, 0); #endif pohmelfs_data_unlock(pi, 0, ~0, POHMELFS_WRITE_LOCK); mutex_unlock(&inode->i_mutex); } static void pohmelfs_put_inode_count(struct pohmelfs_inode *pi, unsigned int count) { dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n", __func__, pi->ino, pi, &pi->vfs_inode, count); if (test_and_clear_bit(NETFS_INODE_NEED_FLUSH, &pi->state)) pohmelfs_flush_inode(pi, count); while (count--) iput(&pi->vfs_inode); } static void pohmelfs_drop_scan(struct work_struct *work) { struct pohmelfs_sb *psb = container_of(work, struct pohmelfs_sb, drop_dwork.work); struct pohmelfs_inode *pi; unsigned int count = 0; while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count))) pohmelfs_put_inode_count(pi, count); pohmelfs_check_states(psb); if (psb->drop_scan_timeout) schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout); } /* * Run through all transactions starting from the oldest, * drop transaction from current state and try to send it * to all remote nodes, which are currently installed. */ static void pohmelfs_trans_scan_state(struct netfs_state *st) { struct rb_node *rb_node; struct netfs_trans_dst *dst; struct pohmelfs_sb *psb = st->psb; unsigned int timeout = psb->trans_scan_timeout; struct netfs_trans *t; int err; mutex_lock(&st->trans_lock); for (rb_node = rb_first(&st->trans_root); rb_node; ) { dst = rb_entry(rb_node, struct netfs_trans_dst, state_entry); t = dst->trans; if (timeout && time_after(dst->send_time + timeout, jiffies) && dst->retries == 0) break; dprintk("%s: t: %p, gen: %u, st: %p, retries: %u, max: %u.\n", __func__, t, t->gen, st, dst->retries, psb->trans_retries); netfs_trans_get(t); rb_node = rb_next(rb_node); err = -ETIMEDOUT; if (timeout && (++dst->retries < psb->trans_retries)) err = netfs_trans_resend(t, psb); if (err || (t->flags & NETFS_TRANS_SINGLE_DST)) { if (netfs_trans_remove_nolock(dst, st)) netfs_trans_drop_dst_nostate(dst); } t->result = err; netfs_trans_put(t); } mutex_unlock(&st->trans_lock); } /* * Walk through all installed network states and resend all * transactions, which are old enough. */ static void pohmelfs_trans_scan(struct work_struct *work) { struct pohmelfs_sb *psb = container_of(work, struct pohmelfs_sb, dwork.work); struct netfs_state *st; struct pohmelfs_config *c; mutex_lock(&psb->state_lock); list_for_each_entry(c, &psb->state_list, config_entry) { st = &c->state; pohmelfs_trans_scan_state(st); } mutex_unlock(&psb->state_lock); /* * If no timeout specified then system is in the middle of umount process, * so no need to reschedule scanning process again. */ if (psb->trans_scan_timeout) schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout); } int pohmelfs_meta_command_data(struct pohmelfs_inode *pi, u64 id, unsigned int cmd_op, char *addon, unsigned int flags, netfs_trans_complete_t complete, void *priv, u64 start) { struct inode *inode = &pi->vfs_inode; struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); int err = 0, sz; struct netfs_trans *t; int path_len, addon_len = 0; void *data; struct netfs_inode_info *info; struct netfs_cmd *cmd; dprintk("%s: ino: %llu, cmd: %u, addon: %p.\n", __func__, pi->ino, cmd_op, addon); path_len = pohmelfs_path_length(pi); if (path_len < 0) { err = path_len; goto err_out_exit; } if (addon) addon_len = strlen(addon) + 1; /* 0-byte */ sz = addon_len; if (cmd_op == NETFS_INODE_INFO) sz += sizeof(struct netfs_inode_info); t = netfs_trans_alloc(psb, sz + path_len, flags, 0); if (!t) { err = -ENOMEM; goto err_out_exit; } t->complete = complete; t->private = priv; cmd = netfs_trans_current(t); data = (void *)(cmd + 1); if (cmd_op == NETFS_INODE_INFO) { info = (struct netfs_inode_info *)(cmd + 1); data = (void *)(info + 1); /* * We are under i_mutex, can read and change whatever we want... */ info->mode = inode->i_mode; info->nlink = inode->i_nlink; info->uid = inode->i_uid; info->gid = inode->i_gid; info->blocks = inode->i_blocks; info->rdev = inode->i_rdev; info->size = inode->i_size; info->version = inode->i_version; netfs_convert_inode_info(info); } path_len = pohmelfs_construct_path_string(pi, data, path_len); if (path_len < 0) goto err_out_free; dprintk("%s: path_len: %d.\n", __func__, path_len); if (addon) { path_len--; /* Do not place null-byte before the addon */ path_len += sprintf(data + path_len, "/%s", addon) + 1; /* 0 - byte */ } sz += path_len; cmd->cmd = cmd_op; cmd->ext = path_len; cmd->size = sz; cmd->id = id; cmd->start = start; netfs_convert_cmd(cmd); netfs_trans_update(cmd, t, sz); /* * Note, that it is possible to leak error here: transaction callback will not * be invoked for allocation path failure. */ return netfs_trans_finish(t, psb); err_out_free: netfs_trans_free(t); err_out_exit: if (complete) complete(NULL, 0, priv, err); return err; } int pohmelfs_meta_command(struct pohmelfs_inode *pi, unsigned int cmd_op, unsigned int flags, netfs_trans_complete_t complete, void *priv, u64 start) { return pohmelfs_meta_command_data(pi, pi->ino, cmd_op, NULL, flags, complete, priv, start); } /* * Send request and wait for POHMELFS root capabilities response, * which will update server's informaion about size of the export, * permissions, number of objects, available size and so on. */ static int pohmelfs_root_handshake(struct pohmelfs_sb *psb) { struct netfs_trans *t; struct netfs_cmd *cmd; int err = -ENOMEM; t = netfs_trans_alloc(psb, 0, 0, 0); if (!t) goto err_out_exit; cmd = netfs_trans_current(t); cmd->cmd = NETFS_CAPABILITIES; cmd->id = POHMELFS_ROOT_CAPABILITIES; cmd->size = 0; cmd->start = 0; cmd->ext = 0; cmd->csize = 0; netfs_convert_cmd(cmd); netfs_trans_update(cmd, t, 0); err = netfs_trans_finish(t, psb); if (err) goto err_out_exit; psb->flags = ~0; err = wait_event_interruptible_timeout(psb->wait, (psb->flags != ~0), psb->wait_on_page_timeout); if (!err) err = -ETIMEDOUT; else if (err > 0) err = -psb->flags; if (err) goto err_out_exit; return 0; err_out_exit: return err; } static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt) { struct netfs_state *st; struct pohmelfs_ctl *ctl; struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb); struct pohmelfs_config *c; mutex_lock(&psb->state_lock); seq_printf(m, "\nidx addr(:port) socket_type protocol active priority permissions\n"); list_for_each_entry(c, &psb->state_list, config_entry) { st = &c->state; ctl = &st->ctl; seq_printf(m, "%u ", ctl->idx); if (ctl->addr.sa_family == AF_INET) { struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr; seq_printf(m, "%pI4:%u", &sin->sin_addr.s_addr, ntohs(sin->sin_port)); } else if (ctl->addr.sa_family == AF_INET6) { struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr; seq_printf(m, "%pi6:%u", &sin->sin6_addr, ntohs(sin->sin6_port)); } else { unsigned int i; for (i = 0; i < ctl->addrlen; ++i) seq_printf(m, "%02x.", ctl->addr.addr[i]); } seq_printf(m, " %u %u %d %u %x\n", ctl->type, ctl->proto, st->socket != NULL, ctl->prio, ctl->perm); } mutex_unlock(&psb->state_lock); return 0; } static const struct super_operations pohmelfs_sb_ops = { .alloc_inode = pohmelfs_alloc_inode, .destroy_inode = pohmelfs_destroy_inode, .drop_inode = pohmelfs_drop_inode, .write_inode = pohmelfs_write_inode, .put_super = pohmelfs_put_super, .remount_fs = pohmelfs_remount, .statfs = pohmelfs_statfs, .show_options = pohmelfs_show_options, .show_stats = pohmelfs_show_stats, }; /* * Allocate private superblock and create root dir. */ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent) { struct pohmelfs_sb *psb; int err = -ENOMEM; struct inode *root; struct pohmelfs_inode *npi; struct qstr str; psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL); if (!psb) goto err_out_exit; err = bdi_init(&psb->bdi); if (err) goto err_out_free_sb; err = bdi_register(&psb->bdi, NULL, "pfs-%d", atomic_inc_return(&psb_bdi_num)); if (err) { bdi_destroy(&psb->bdi); goto err_out_free_sb; } sb->s_fs_info = psb; sb->s_op = &pohmelfs_sb_ops; sb->s_magic = POHMELFS_MAGIC_NUM; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_SIZE; sb->s_bdi = &psb->bdi; psb->sb = sb; psb->ino = 2; psb->idx = 0; psb->active_state = NULL; psb->trans_retries = 5; psb->trans_data_size = PAGE_SIZE; psb->drop_scan_timeout = msecs_to_jiffies(1000); psb->trans_scan_timeout = msecs_to_jiffies(5000); psb->wait_on_page_timeout = msecs_to_jiffies(5000); init_waitqueue_head(&psb->wait); spin_lock_init(&psb->ino_lock); INIT_LIST_HEAD(&psb->drop_list); mutex_init(&psb->mcache_lock); psb->mcache_root = RB_ROOT; psb->mcache_timeout = msecs_to_jiffies(5000); atomic_long_set(&psb->mcache_gen, 0); psb->trans_max_pages = 100; psb->crypto_align_size = 16; psb->crypto_attached_size = 0; psb->hash_strlen = 0; psb->cipher_strlen = 0; psb->perform_crypto = 0; psb->crypto_thread_num = 2; psb->crypto_fail_unsupported = 0; mutex_init(&psb->crypto_thread_lock); INIT_LIST_HEAD(&psb->crypto_ready_list); INIT_LIST_HEAD(&psb->crypto_active_list); atomic_set(&psb->trans_gen, 1); atomic_long_set(&psb->total_inodes, 0); mutex_init(&psb->state_lock); INIT_LIST_HEAD(&psb->state_list); err = pohmelfs_parse_options((char *) data, psb, 0); if (err) goto err_out_free_bdi; err = pohmelfs_copy_crypto(psb); if (err) goto err_out_free_bdi; err = pohmelfs_state_init(psb); if (err) goto err_out_free_strings; err = pohmelfs_crypto_init(psb); if (err) goto err_out_state_exit; err = pohmelfs_root_handshake(psb); if (err) goto err_out_crypto_exit; str.name = "/"; str.hash = jhash("/", 1, 0); str.len = 1; npi = pohmelfs_create_entry_local(psb, NULL, &str, 0, 0755|S_IFDIR); if (IS_ERR(npi)) { err = PTR_ERR(npi); goto err_out_crypto_exit; } set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state); clear_bit(NETFS_INODE_OWNED, &npi->state); root = &npi->vfs_inode; sb->s_root = d_alloc_root(root); if (!sb->s_root) goto err_out_put_root; INIT_DELAYED_WORK(&psb->drop_dwork, pohmelfs_drop_scan); schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout); INIT_DELAYED_WORK(&psb->dwork, pohmelfs_trans_scan); schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout); return 0; err_out_put_root: iput(root); err_out_crypto_exit: pohmelfs_crypto_exit(psb); err_out_state_exit: pohmelfs_state_exit(psb); err_out_free_strings: kfree(psb->cipher_string); kfree(psb->hash_string); err_out_free_bdi: bdi_destroy(&psb->bdi); err_out_free_sb: kfree(psb); err_out_exit: dprintk("%s: err: %d.\n", __func__, err); return err; } /* * Some VFS magic here... */ static struct dentry *pohmelfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_nodev(fs_type, flags, data, pohmelfs_fill_super); } /* * We need this to sync all inodes earlier, since when writeback * is invoked from the umount/mntput path dcache is already shrunk, * see generic_shutdown_super(), and no inodes can access the path. */ static void pohmelfs_kill_super(struct super_block *sb) { sync_inodes_sb(sb); kill_anon_super(sb); } static struct file_system_type pohmel_fs_type = { .owner = THIS_MODULE, .name = "pohmel", .mount = pohmelfs_mount, .kill_sb = pohmelfs_kill_super, }; /* * Cache and module initializations and freeing routings. */ static void pohmelfs_init_once(void *data) { struct pohmelfs_inode *pi = data; inode_init_once(&pi->vfs_inode); } static int __init pohmelfs_init_inodecache(void) { pohmelfs_inode_cache = kmem_cache_create("pohmelfs_inode_cache", sizeof(struct pohmelfs_inode), 0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD), pohmelfs_init_once); if (!pohmelfs_inode_cache) return -ENOMEM; return 0; } static void pohmelfs_destroy_inodecache(void) { kmem_cache_destroy(pohmelfs_inode_cache); } static int __init init_pohmel_fs(void) { int err; err = pohmelfs_config_init(); if (err) goto err_out_exit; err = pohmelfs_init_inodecache(); if (err) goto err_out_config_exit; err = pohmelfs_mcache_init(); if (err) goto err_out_destroy; err = netfs_trans_init(); if (err) goto err_out_mcache_exit; err = register_filesystem(&pohmel_fs_type); if (err) goto err_out_trans; return 0; err_out_trans: netfs_trans_exit(); err_out_mcache_exit: pohmelfs_mcache_exit(); err_out_destroy: pohmelfs_destroy_inodecache(); err_out_config_exit: pohmelfs_config_exit(); err_out_exit: return err; } static void __exit exit_pohmel_fs(void) { unregister_filesystem(&pohmel_fs_type); pohmelfs_destroy_inodecache(); pohmelfs_mcache_exit(); pohmelfs_config_exit(); netfs_trans_exit(); } module_init(init_pohmel_fs); module_exit(exit_pohmel_fs); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); MODULE_DESCRIPTION("Pohmel filesystem");
gpl-2.0
mrg666/android_kernel_icon
drivers/staging/iio/magnetometer/ak8975.c
2295
15396
/* * A sensor driver for the magnetometer AK8975. * * Magnetic compass sensor driver for monitoring magnetic flux information. * * Copyright (c) 2010, NVIDIA Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/gpio.h> #include "../iio.h" #include "magnet.h" /* * Register definitions, as well as various shifts and masks to get at the * individual fields of the registers. */ #define AK8975_REG_WIA 0x00 #define AK8975_DEVICE_ID 0x48 #define AK8975_REG_INFO 0x01 #define AK8975_REG_ST1 0x02 #define AK8975_REG_ST1_DRDY_SHIFT 0 #define AK8975_REG_ST1_DRDY_MASK (1 << AK8975_REG_ST1_DRDY_SHIFT) #define AK8975_REG_HXL 0x03 #define AK8975_REG_HXH 0x04 #define AK8975_REG_HYL 0x05 #define AK8975_REG_HYH 0x06 #define AK8975_REG_HZL 0x07 #define AK8975_REG_HZH 0x08 #define AK8975_REG_ST2 0x09 #define AK8975_REG_ST2_DERR_SHIFT 2 #define AK8975_REG_ST2_DERR_MASK (1 << AK8975_REG_ST2_DERR_SHIFT) #define AK8975_REG_ST2_HOFL_SHIFT 3 #define AK8975_REG_ST2_HOFL_MASK (1 << AK8975_REG_ST2_HOFL_SHIFT) #define AK8975_REG_CNTL 0x0A #define AK8975_REG_CNTL_MODE_SHIFT 0 #define AK8975_REG_CNTL_MODE_MASK (0xF << AK8975_REG_CNTL_MODE_SHIFT) #define AK8975_REG_CNTL_MODE_POWER_DOWN 0 #define AK8975_REG_CNTL_MODE_ONCE 1 #define AK8975_REG_CNTL_MODE_SELF_TEST 8 #define AK8975_REG_CNTL_MODE_FUSE_ROM 0xF #define AK8975_REG_RSVC 0x0B #define AK8975_REG_ASTC 0x0C #define AK8975_REG_TS1 0x0D #define AK8975_REG_TS2 0x0E #define AK8975_REG_I2CDIS 0x0F #define AK8975_REG_ASAX 0x10 #define AK8975_REG_ASAY 0x11 #define AK8975_REG_ASAZ 0x12 #define AK8975_MAX_REGS AK8975_REG_ASAZ /* * Miscellaneous values. */ #define AK8975_MAX_CONVERSION_TIMEOUT 500 #define AK8975_CONVERSION_DONE_POLL_TIME 10 /* * Per-instance context data for the device. */ struct ak8975_data { struct i2c_client *client; struct iio_dev *indio_dev; struct attribute_group attrs; struct mutex lock; u8 asa[3]; long raw_to_gauss[3]; unsigned long mode; u8 reg_cache[AK8975_MAX_REGS]; int eoc_gpio; int eoc_irq; }; /* * Helper function to write to the I2C device's registers. */ static int ak8975_write_data(struct i2c_client *client, u8 reg, u8 val, u8 mask, u8 shift) { u8 regval; struct i2c_msg msg; u8 w_data[2]; int ret = 0; struct ak8975_data *data = i2c_get_clientdata(client); regval = data->reg_cache[reg]; regval &= ~mask; regval |= val << shift; w_data[0] = reg; w_data[1] = regval; msg.addr = client->addr; msg.flags = 0; msg.len = 2; msg.buf = w_data; ret = i2c_transfer(client->adapter, &msg, 1); if (ret < 0) { dev_err(&client->dev, "Write to device fails status %x\n", ret); return ret; } data->reg_cache[reg] = regval; return 0; } /* * Helper function to read a contiguous set of the I2C device's registers. */ static int ak8975_read_data(struct i2c_client *client, u8 reg, u8 length, u8 *buffer) { struct i2c_msg msg[2]; u8 w_data[2]; int ret; w_data[0] = reg; msg[0].addr = client->addr; msg[0].flags = I2C_M_NOSTART; /* set repeated start and write */ msg[0].len = 1; msg[0].buf = w_data; msg[1].addr = client->addr; msg[1].flags = I2C_M_RD; msg[1].len = length; msg[1].buf = buffer; ret = i2c_transfer(client->adapter, msg, 2); if (ret < 0) { dev_err(&client->dev, "Read from device fails\n"); return ret; } return 0; } /* * Perform some start-of-day setup, including reading the asa calibration * values and caching them. */ static int ak8975_setup(struct i2c_client *client) { struct ak8975_data *data = i2c_get_clientdata(client); u8 device_id; int ret; /* Confirm that the device we're talking to is really an AK8975. */ ret = ak8975_read_data(client, AK8975_REG_WIA, 1, &device_id); if (ret < 0) { dev_err(&client->dev, "Error reading WIA\n"); return ret; } if (device_id != AK8975_DEVICE_ID) { dev_err(&client->dev, "Device ak8975 not found\n"); return -ENODEV; } /* Write the fused rom access mode. */ ret = ak8975_write_data(client, AK8975_REG_CNTL, AK8975_REG_CNTL_MODE_FUSE_ROM, AK8975_REG_CNTL_MODE_MASK, AK8975_REG_CNTL_MODE_SHIFT); if (ret < 0) { dev_err(&client->dev, "Error in setting fuse access mode\n"); return ret; } /* Get asa data and store in the device data. */ ret = ak8975_read_data(client, AK8975_REG_ASAX, 3, data->asa); if (ret < 0) { dev_err(&client->dev, "Not able to read asa data\n"); return ret; } /* Precalculate scale factor for each axis and store in the device data. */ data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8; data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8; data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8; return 0; } /* * Shows the device's mode. 0 = off, 1 = on. */ static ssize_t show_mode(struct device *dev, struct device_attribute *devattr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ak8975_data *data = indio_dev->dev_data; return sprintf(buf, "%lu\n", data->mode); } /* * Sets the device's mode. 0 = off, 1 = on. The device's mode must be on * for the magn raw attributes to be available. */ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ak8975_data *data = indio_dev->dev_data; struct i2c_client *client = data->client; unsigned long oval; int ret; /* Convert mode string and do some basic sanity checking on it. only 0 or 1 are valid. */ if (strict_strtoul(buf, 10, &oval)) return -EINVAL; if (oval > 1) { dev_err(dev, "mode value is not supported\n"); return -EINVAL; } mutex_lock(&data->lock); /* Write the mode to the device. */ if (data->mode != oval) { ret = ak8975_write_data(client, AK8975_REG_CNTL, (u8)oval, AK8975_REG_CNTL_MODE_MASK, AK8975_REG_CNTL_MODE_SHIFT); if (ret < 0) { dev_err(&client->dev, "Error in setting mode\n"); mutex_unlock(&data->lock); return ret; } data->mode = oval; } mutex_unlock(&data->lock); return count; } /* * Emits the scale factor to bring the raw value into Gauss units. * * This scale factor is axis-dependent, and is derived from 3 calibration * factors ASA(x), ASA(y), and ASA(z). * * These ASA values are read from the sensor device at start of day, and * cached in the device context struct. * * Adjusting the flux value with the sensitivity adjustment value should be * done via the following formula: * * Hadj = H * ( ( ( (ASA-128)*0.5 ) / 128 ) + 1 ) * * where H is the raw value, ASA is the sensitivity adjustment, and Hadj * is the resultant adjusted value. * * We reduce the formula to: * * Hadj = H * (ASA + 128) / 256 * * H is in the range of -4096 to 4095. The magnetometer has a range of * +-1229uT. To go from the raw value to uT is: * * HuT = H * 1229/4096, or roughly, 3/10. * * Since 1uT = 100 gauss, our final scale factor becomes: * * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100 * Hadj = H * ((ASA + 128) * 30 / 256 * * Since ASA doesn't change, we cache the resultant scale factor into the * device context in ak8975_setup(). */ static ssize_t show_scale(struct device *dev, struct device_attribute *devattr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ak8975_data *data = indio_dev->dev_data; struct iio_dev_attr *this_attr = to_iio_dev_attr(devattr); return sprintf(buf, "%ld\n", data->raw_to_gauss[this_attr->address]); } static int wait_conversion_complete_gpio(struct ak8975_data *data) { struct i2c_client *client = data->client; u8 read_status; u32 timeout_ms = AK8975_MAX_CONVERSION_TIMEOUT; int ret; /* Wait for the conversion to complete. */ while (timeout_ms) { msleep(AK8975_CONVERSION_DONE_POLL_TIME); if (gpio_get_value(data->eoc_gpio)) break; timeout_ms -= AK8975_CONVERSION_DONE_POLL_TIME; } if (!timeout_ms) { dev_err(&client->dev, "Conversion timeout happened\n"); return -EINVAL; } ret = ak8975_read_data(client, AK8975_REG_ST1, 1, &read_status); if (ret < 0) { dev_err(&client->dev, "Error in reading ST1\n"); return ret; } return read_status; } static int wait_conversion_complete_polled(struct ak8975_data *data) { struct i2c_client *client = data->client; u8 read_status; u32 timeout_ms = AK8975_MAX_CONVERSION_TIMEOUT; int ret; /* Wait for the conversion to complete. */ while (timeout_ms) { msleep(AK8975_CONVERSION_DONE_POLL_TIME); ret = ak8975_read_data(client, AK8975_REG_ST1, 1, &read_status); if (ret < 0) { dev_err(&client->dev, "Error in reading ST1\n"); return ret; } if (read_status) break; timeout_ms -= AK8975_CONVERSION_DONE_POLL_TIME; } if (!timeout_ms) { dev_err(&client->dev, "Conversion timeout happened\n"); return -EINVAL; } return read_status; } /* * Emits the raw flux value for the x, y, or z axis. */ static ssize_t show_raw(struct device *dev, struct device_attribute *devattr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ak8975_data *data = indio_dev->dev_data; struct i2c_client *client = data->client; struct iio_dev_attr *this_attr = to_iio_dev_attr(devattr); u16 meas_reg; s16 raw; u8 read_status; int ret; mutex_lock(&data->lock); if (data->mode == 0) { dev_err(&client->dev, "Operating mode is in power down mode\n"); ret = -EBUSY; goto exit; } /* Set up the device for taking a sample. */ ret = ak8975_write_data(client, AK8975_REG_CNTL, AK8975_REG_CNTL_MODE_ONCE, AK8975_REG_CNTL_MODE_MASK, AK8975_REG_CNTL_MODE_SHIFT); if (ret < 0) { dev_err(&client->dev, "Error in setting operating mode\n"); goto exit; } /* Wait for the conversion to complete. */ if (data->eoc_gpio) ret = wait_conversion_complete_gpio(data); else ret = wait_conversion_complete_polled(data); if (ret < 0) goto exit; read_status = ret; if (read_status & AK8975_REG_ST1_DRDY_MASK) { ret = ak8975_read_data(client, AK8975_REG_ST2, 1, &read_status); if (ret < 0) { dev_err(&client->dev, "Error in reading ST2\n"); goto exit; } if (read_status & (AK8975_REG_ST2_DERR_MASK | AK8975_REG_ST2_HOFL_MASK)) { dev_err(&client->dev, "ST2 status error 0x%x\n", read_status); ret = -EINVAL; goto exit; } } /* Read the flux value from the appropriate register (the register is specified in the iio device attributes). */ ret = ak8975_read_data(client, this_attr->address, 2, (u8 *)&meas_reg); if (ret < 0) { dev_err(&client->dev, "Read axis data fails\n"); goto exit; } mutex_unlock(&data->lock); /* Endian conversion of the measured values. */ raw = (s16) (le16_to_cpu(meas_reg)); /* Clamp to valid range. */ raw = clamp_t(s16, raw, -4096, 4095); return sprintf(buf, "%d\n", raw); exit: mutex_unlock(&data->lock); return ret; } static IIO_DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, store_mode, 0); static IIO_DEV_ATTR_MAGN_X_SCALE(S_IRUGO, show_scale, NULL, 0); static IIO_DEV_ATTR_MAGN_Y_SCALE(S_IRUGO, show_scale, NULL, 1); static IIO_DEV_ATTR_MAGN_Z_SCALE(S_IRUGO, show_scale, NULL, 2); static IIO_DEV_ATTR_MAGN_X(show_raw, AK8975_REG_HXL); static IIO_DEV_ATTR_MAGN_Y(show_raw, AK8975_REG_HYL); static IIO_DEV_ATTR_MAGN_Z(show_raw, AK8975_REG_HZL); static struct attribute *ak8975_attr[] = { &iio_dev_attr_mode.dev_attr.attr, &iio_dev_attr_magn_x_scale.dev_attr.attr, &iio_dev_attr_magn_y_scale.dev_attr.attr, &iio_dev_attr_magn_z_scale.dev_attr.attr, &iio_dev_attr_magn_x_raw.dev_attr.attr, &iio_dev_attr_magn_y_raw.dev_attr.attr, &iio_dev_attr_magn_z_raw.dev_attr.attr, NULL }; static struct attribute_group ak8975_attr_group = { .attrs = ak8975_attr, }; static const struct iio_info ak8975_info = { .attrs = &ak8975_attr_group, .driver_module = THIS_MODULE, }; static int ak8975_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ak8975_data *data; int err; /* Allocate our device context. */ data = kzalloc(sizeof(struct ak8975_data), GFP_KERNEL); if (!data) { dev_err(&client->dev, "Memory allocation fails\n"); err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); data->client = client; mutex_init(&data->lock); /* Grab and set up the supplied GPIO. */ data->eoc_irq = client->irq; data->eoc_gpio = irq_to_gpio(client->irq); /* We may not have a GPIO based IRQ to scan, that is fine, we will poll if so */ if (data->eoc_gpio > 0) { err = gpio_request(data->eoc_gpio, "ak_8975"); if (err < 0) { dev_err(&client->dev, "failed to request GPIO %d, error %d\n", data->eoc_gpio, err); goto exit_free; } err = gpio_direction_input(data->eoc_gpio); if (err < 0) { dev_err(&client->dev, "Failed to configure input direction for GPIO %d, error %d\n", data->eoc_gpio, err); goto exit_gpio; } } else data->eoc_gpio = 0; /* No GPIO available */ /* Perform some basic start-of-day setup of the device. */ err = ak8975_setup(client); if (err < 0) { dev_err(&client->dev, "AK8975 initialization fails\n"); goto exit_gpio; } /* Register with IIO */ data->indio_dev = iio_allocate_device(0); if (data->indio_dev == NULL) { err = -ENOMEM; goto exit_gpio; } data->indio_dev->dev.parent = &client->dev; data->indio_dev->info = &ak8975_info; data->indio_dev->dev_data = (void *)(data); data->indio_dev->modes = INDIO_DIRECT_MODE; err = iio_device_register(data->indio_dev); if (err < 0) goto exit_free_iio; return 0; exit_free_iio: iio_free_device(data->indio_dev); exit_gpio: if (data->eoc_gpio) gpio_free(data->eoc_gpio); exit_free: kfree(data); exit: return err; } static int ak8975_remove(struct i2c_client *client) { struct ak8975_data *data = i2c_get_clientdata(client); iio_device_unregister(data->indio_dev); iio_free_device(data->indio_dev); if (data->eoc_gpio) gpio_free(data->eoc_gpio); kfree(data); return 0; } static const struct i2c_device_id ak8975_id[] = { {"ak8975", 0}, {} }; MODULE_DEVICE_TABLE(i2c, ak8975_id); static struct i2c_driver ak8975_driver = { .driver = { .name = "ak8975", }, .probe = ak8975_probe, .remove = __devexit_p(ak8975_remove), .id_table = ak8975_id, }; static int __init ak8975_init(void) { return i2c_add_driver(&ak8975_driver); } static void __exit ak8975_exit(void) { i2c_del_driver(&ak8975_driver); } module_init(ak8975_init); module_exit(ak8975_exit); MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); MODULE_DESCRIPTION("AK8975 magnetometer driver"); MODULE_LICENSE("GPL");
gpl-2.0
broodplank/samsung-kernel-jfltexx
kernel/pid_namespace.c
4087
6016
/* * Pid namespaces * * Authors: * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM * Many thanks to Oleg Nesterov for comments and help * */ #include <linux/pid.h> #include <linux/pid_namespace.h> #include <linux/syscalls.h> #include <linux/err.h> #include <linux/acct.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/reboot.h> #define BITS_PER_PAGE (PAGE_SIZE*8) struct pid_cache { int nr_ids; char name[16]; struct kmem_cache *cachep; struct list_head list; }; static LIST_HEAD(pid_caches_lh); static DEFINE_MUTEX(pid_caches_mutex); static struct kmem_cache *pid_ns_cachep; /* * creates the kmem cache to allocate pids from. * @nr_ids: the number of numerical ids this pid will have to carry */ static struct kmem_cache *create_pid_cachep(int nr_ids) { struct pid_cache *pcache; struct kmem_cache *cachep; mutex_lock(&pid_caches_mutex); list_for_each_entry(pcache, &pid_caches_lh, list) if (pcache->nr_ids == nr_ids) goto out; pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL); if (pcache == NULL) goto err_alloc; snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids); cachep = kmem_cache_create(pcache->name, sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid), 0, SLAB_HWCACHE_ALIGN, NULL); if (cachep == NULL) goto err_cachep; pcache->nr_ids = nr_ids; pcache->cachep = cachep; list_add(&pcache->list, &pid_caches_lh); out: mutex_unlock(&pid_caches_mutex); return pcache->cachep; err_cachep: kfree(pcache); err_alloc: mutex_unlock(&pid_caches_mutex); return NULL; } static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns) { struct pid_namespace *ns; unsigned int level = parent_pid_ns->level + 1; int i, err = -ENOMEM; ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); if (ns == NULL) goto out; ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!ns->pidmap[0].page) goto out_free; ns->pid_cachep = create_pid_cachep(level + 1); if (ns->pid_cachep == NULL) goto out_free_map; kref_init(&ns->kref); ns->level = level; ns->parent = get_pid_ns(parent_pid_ns); set_bit(0, ns->pidmap[0].page); atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); for (i = 1; i < PIDMAP_ENTRIES; i++) atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); err = pid_ns_prepare_proc(ns); if (err) goto out_put_parent_pid_ns; return ns; out_put_parent_pid_ns: put_pid_ns(parent_pid_ns); out_free_map: kfree(ns->pidmap[0].page); out_free: kmem_cache_free(pid_ns_cachep, ns); out: return ERR_PTR(err); } static void destroy_pid_namespace(struct pid_namespace *ns) { int i; for (i = 0; i < PIDMAP_ENTRIES; i++) kfree(ns->pidmap[i].page); kmem_cache_free(pid_ns_cachep, ns); } struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns) { if (!(flags & CLONE_NEWPID)) return get_pid_ns(old_ns); if (flags & (CLONE_THREAD|CLONE_PARENT)) return ERR_PTR(-EINVAL); return create_pid_namespace(old_ns); } void free_pid_ns(struct kref *kref) { struct pid_namespace *ns, *parent; ns = container_of(kref, struct pid_namespace, kref); parent = ns->parent; destroy_pid_namespace(ns); if (parent != NULL) put_pid_ns(parent); } void zap_pid_ns_processes(struct pid_namespace *pid_ns) { int nr; int rc; struct task_struct *task; /* * The last thread in the cgroup-init thread group is terminating. * Find remaining pid_ts in the namespace, signal and wait for them * to exit. * * Note: This signals each threads in the namespace - even those that * belong to the same thread group, To avoid this, we would have * to walk the entire tasklist looking a processes in this * namespace, but that could be unnecessarily expensive if the * pid namespace has just a few processes. Or we need to * maintain a tasklist for each pid namespace. * */ read_lock(&tasklist_lock); nr = next_pidmap(pid_ns, 1); while (nr > 0) { rcu_read_lock(); task = pid_task(find_vpid(nr), PIDTYPE_PID); if (task && !__fatal_signal_pending(task)) send_sig_info(SIGKILL, SEND_SIG_FORCED, task); rcu_read_unlock(); nr = next_pidmap(pid_ns, nr); } read_unlock(&tasklist_lock); do { clear_thread_flag(TIF_SIGPENDING); rc = sys_wait4(-1, NULL, __WALL, NULL); } while (rc != -ECHILD); if (pid_ns->reboot) current->signal->group_exit_code = pid_ns->reboot; acct_exit_ns(pid_ns); return; } static int pid_ns_ctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table tmp = *table; if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; /* * Writing directly to ns' last_pid field is OK, since this field * is volatile in a living namespace anyway and a code writing to * it should synchronize its usage with external means. */ tmp.data = &current->nsproxy->pid_ns->last_pid; return proc_dointvec(&tmp, write, buffer, lenp, ppos); } static struct ctl_table pid_ns_ctl_table[] = { { .procname = "ns_last_pid", .maxlen = sizeof(int), .mode = 0666, /* permissions are checked in the handler */ .proc_handler = pid_ns_ctl_handler, }, { } }; static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } }; int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) { if (pid_ns == &init_pid_ns) return 0; switch (cmd) { case LINUX_REBOOT_CMD_RESTART2: case LINUX_REBOOT_CMD_RESTART: pid_ns->reboot = SIGHUP; break; case LINUX_REBOOT_CMD_POWER_OFF: case LINUX_REBOOT_CMD_HALT: pid_ns->reboot = SIGINT; break; default: return -EINVAL; } read_lock(&tasklist_lock); force_sig(SIGKILL, pid_ns->child_reaper); read_unlock(&tasklist_lock); do_exit(0); /* Not reached */ return 0; } static __init int pid_namespaces_init(void) { pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); register_sysctl_paths(kern_path, pid_ns_ctl_table); return 0; } __initcall(pid_namespaces_init);
gpl-2.0
TeamFreedom/mecha-2.6.35-gb-mr
drivers/leds/leds-locomo.c
4087
2344
/* * linux/drivers/leds/leds-locomo.c * * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/leds.h> #include <mach/hardware.h> #include <asm/hardware/locomo.h> static void locomoled_brightness_set(struct led_classdev *led_cdev, enum led_brightness value, int offset) { struct locomo_dev *locomo_dev = LOCOMO_DEV(led_cdev->dev->parent); unsigned long flags; local_irq_save(flags); if (value) locomo_writel(LOCOMO_LPT_TOFH, locomo_dev->mapbase + offset); else locomo_writel(LOCOMO_LPT_TOFL, locomo_dev->mapbase + offset); local_irq_restore(flags); } static void locomoled_brightness_set0(struct led_classdev *led_cdev, enum led_brightness value) { locomoled_brightness_set(led_cdev, value, LOCOMO_LPT0); } static void locomoled_brightness_set1(struct led_classdev *led_cdev, enum led_brightness value) { locomoled_brightness_set(led_cdev, value, LOCOMO_LPT1); } static struct led_classdev locomo_led0 = { .name = "locomo:amber:charge", .default_trigger = "main-battery-charging", .brightness_set = locomoled_brightness_set0, }; static struct led_classdev locomo_led1 = { .name = "locomo:green:mail", .default_trigger = "nand-disk", .brightness_set = locomoled_brightness_set1, }; static int locomoled_probe(struct locomo_dev *ldev) { int ret; ret = led_classdev_register(&ldev->dev, &locomo_led0); if (ret < 0) return ret; ret = led_classdev_register(&ldev->dev, &locomo_led1); if (ret < 0) led_classdev_unregister(&locomo_led0); return ret; } static int locomoled_remove(struct locomo_dev *dev) { led_classdev_unregister(&locomo_led0); led_classdev_unregister(&locomo_led1); return 0; } static struct locomo_driver locomoled_driver = { .drv = { .name = "locomoled" }, .devid = LOCOMO_DEVID_LED, .probe = locomoled_probe, .remove = locomoled_remove, }; static int __init locomoled_init(void) { return locomo_driver_register(&locomoled_driver); } module_init(locomoled_init); MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>"); MODULE_DESCRIPTION("Locomo LED driver"); MODULE_LICENSE("GPL");
gpl-2.0
Eliminater74/android_kernel_pantech_msm8974
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
4855
68988
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. * All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/sched.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/mlx4/cmd.h> #include <linux/mlx4/qp.h> #include "mlx4.h" #include "fw.h" #define MLX4_MAC_VALID (1ull << 63) #define MLX4_MAC_MASK 0x7fffffffffffffffULL #define ETH_ALEN 6 struct mac_res { struct list_head list; u64 mac; u8 port; }; struct res_common { struct list_head list; u32 res_id; int owner; int state; int from_state; int to_state; int removing; }; enum { RES_ANY_BUSY = 1 }; struct res_gid { struct list_head list; u8 gid[16]; enum mlx4_protocol prot; enum mlx4_steer_type steer; }; enum res_qp_states { RES_QP_BUSY = RES_ANY_BUSY, /* QP number was allocated */ RES_QP_RESERVED, /* ICM memory for QP context was mapped */ RES_QP_MAPPED, /* QP is in hw ownership */ RES_QP_HW }; static inline const char *qp_states_str(enum res_qp_states state) { switch (state) { case RES_QP_BUSY: return "RES_QP_BUSY"; case RES_QP_RESERVED: return "RES_QP_RESERVED"; case RES_QP_MAPPED: return "RES_QP_MAPPED"; case RES_QP_HW: return "RES_QP_HW"; default: return "Unknown"; } } struct res_qp { struct res_common com; struct res_mtt *mtt; struct res_cq *rcq; struct res_cq *scq; struct res_srq *srq; struct list_head mcg_list; spinlock_t mcg_spl; int local_qpn; }; enum res_mtt_states { RES_MTT_BUSY = RES_ANY_BUSY, RES_MTT_ALLOCATED, }; static inline const char *mtt_states_str(enum res_mtt_states state) { switch (state) { case RES_MTT_BUSY: return "RES_MTT_BUSY"; case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED"; default: return "Unknown"; } } struct res_mtt { struct res_common com; int order; atomic_t ref_count; }; enum res_mpt_states { RES_MPT_BUSY = RES_ANY_BUSY, RES_MPT_RESERVED, RES_MPT_MAPPED, RES_MPT_HW, }; struct res_mpt { struct res_common com; struct res_mtt *mtt; int key; }; enum res_eq_states { RES_EQ_BUSY = RES_ANY_BUSY, RES_EQ_RESERVED, RES_EQ_HW, }; struct res_eq { struct res_common com; struct res_mtt *mtt; }; enum res_cq_states { RES_CQ_BUSY = RES_ANY_BUSY, RES_CQ_ALLOCATED, RES_CQ_HW, }; struct res_cq { struct res_common com; struct res_mtt *mtt; atomic_t ref_count; }; enum res_srq_states { RES_SRQ_BUSY = RES_ANY_BUSY, RES_SRQ_ALLOCATED, RES_SRQ_HW, }; static inline const char *srq_states_str(enum res_srq_states state) { switch (state) { case RES_SRQ_BUSY: return "RES_SRQ_BUSY"; case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED"; case RES_SRQ_HW: return "RES_SRQ_HW"; default: return "Unknown"; } } struct res_srq { struct res_common com; struct res_mtt *mtt; struct res_cq *cq; atomic_t ref_count; }; enum res_counter_states { RES_COUNTER_BUSY = RES_ANY_BUSY, RES_COUNTER_ALLOCATED, }; static inline const char *counter_states_str(enum res_counter_states state) { switch (state) { case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY"; case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED"; default: return "Unknown"; } } struct res_counter { struct res_common com; int port; }; /* For Debug uses */ static const char *ResourceType(enum mlx4_resource rt) { switch (rt) { case RES_QP: return "RES_QP"; case RES_CQ: return "RES_CQ"; case RES_SRQ: return "RES_SRQ"; case RES_MPT: return "RES_MPT"; case RES_MTT: return "RES_MTT"; case RES_MAC: return "RES_MAC"; case RES_EQ: return "RES_EQ"; case RES_COUNTER: return "RES_COUNTER"; default: return "Unknown resource type !!!"; }; } int mlx4_init_resource_tracker(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i; int t; priv->mfunc.master.res_tracker.slave_list = kzalloc(dev->num_slaves * sizeof(struct slave_list), GFP_KERNEL); if (!priv->mfunc.master.res_tracker.slave_list) return -ENOMEM; for (i = 0 ; i < dev->num_slaves; i++) { for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t) INIT_LIST_HEAD(&priv->mfunc.master.res_tracker. slave_list[i].res_list[t]); mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex); } mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", dev->num_slaves); for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i], GFP_ATOMIC|__GFP_NOWARN); spin_lock_init(&priv->mfunc.master.res_tracker.lock); return 0 ; } void mlx4_free_resource_tracker(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); int i; if (priv->mfunc.master.res_tracker.slave_list) { for (i = 0 ; i < dev->num_slaves; i++) mlx4_delete_all_resources_for_slave(dev, i); kfree(priv->mfunc.master.res_tracker.slave_list); } } static void update_ud_gid(struct mlx4_dev *dev, struct mlx4_qp_context *qp_ctx, u8 slave) { u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; if (MLX4_QP_ST_UD == ts) qp_ctx->pri_path.mgid_index = 0x80 | slave; mlx4_dbg(dev, "slave %d, new gid index: 0x%x ", slave, qp_ctx->pri_path.mgid_index); } static int mpt_mask(struct mlx4_dev *dev) { return dev->caps.num_mpts - 1; } static void *find_res(struct mlx4_dev *dev, int res_id, enum mlx4_resource type) { struct mlx4_priv *priv = mlx4_priv(dev); return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type], res_id); } static int get_res(struct mlx4_dev *dev, int slave, int res_id, enum mlx4_resource type, void *res) { struct res_common *r; int err = 0; spin_lock_irq(mlx4_tlock(dev)); r = find_res(dev, res_id, type); if (!r) { err = -ENONET; goto exit; } if (r->state == RES_ANY_BUSY) { err = -EBUSY; goto exit; } if (r->owner != slave) { err = -EPERM; goto exit; } r->from_state = r->state; r->state = RES_ANY_BUSY; mlx4_dbg(dev, "res %s id 0x%x to busy\n", ResourceType(type), r->res_id); if (res) *((struct res_common **)res) = r; exit: spin_unlock_irq(mlx4_tlock(dev)); return err; } int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, enum mlx4_resource type, int res_id, int *slave) { struct res_common *r; int err = -ENOENT; int id = res_id; if (type == RES_QP) id &= 0x7fffff; spin_lock(mlx4_tlock(dev)); r = find_res(dev, id, type); if (r) { *slave = r->owner; err = 0; } spin_unlock(mlx4_tlock(dev)); return err; } static void put_res(struct mlx4_dev *dev, int slave, int res_id, enum mlx4_resource type) { struct res_common *r; spin_lock_irq(mlx4_tlock(dev)); r = find_res(dev, res_id, type); if (r) r->state = r->from_state; spin_unlock_irq(mlx4_tlock(dev)); } static struct res_common *alloc_qp_tr(int id) { struct res_qp *ret; ret = kzalloc(sizeof *ret, GFP_KERNEL); if (!ret) return NULL; ret->com.res_id = id; ret->com.state = RES_QP_RESERVED; ret->local_qpn = id; INIT_LIST_HEAD(&ret->mcg_list); spin_lock_init(&ret->mcg_spl); return &ret->com; } static struct res_common *alloc_mtt_tr(int id, int order) { struct res_mtt *ret; ret = kzalloc(sizeof *ret, GFP_KERNEL); if (!ret) return NULL; ret->com.res_id = id; ret->order = order; ret->com.state = RES_MTT_ALLOCATED; atomic_set(&ret->ref_count, 0); return &ret->com; } static struct res_common *alloc_mpt_tr(int id, int key) { struct res_mpt *ret; ret = kzalloc(sizeof *ret, GFP_KERNEL); if (!ret) return NULL; ret->com.res_id = id; ret->com.state = RES_MPT_RESERVED; ret->key = key; return &ret->com; } static struct res_common *alloc_eq_tr(int id) { struct res_eq *ret; ret = kzalloc(sizeof *ret, GFP_KERNEL); if (!ret) return NULL; ret->com.res_id = id; ret->com.state = RES_EQ_RESERVED; return &ret->com; } static struct res_common *alloc_cq_tr(int id) { struct res_cq *ret; ret = kzalloc(sizeof *ret, GFP_KERNEL); if (!ret) return NULL; ret->com.res_id = id; ret->com.state = RES_CQ_ALLOCATED; atomic_set(&ret->ref_count, 0); return &ret->com; } static struct res_common *alloc_srq_tr(int id) { struct res_srq *ret; ret = kzalloc(sizeof *ret, GFP_KERNEL); if (!ret) return NULL; ret->com.res_id = id; ret->com.state = RES_SRQ_ALLOCATED; atomic_set(&ret->ref_count, 0); return &ret->com; } static struct res_common *alloc_counter_tr(int id) { struct res_counter *ret; ret = kzalloc(sizeof *ret, GFP_KERNEL); if (!ret) return NULL; ret->com.res_id = id; ret->com.state = RES_COUNTER_ALLOCATED; return &ret->com; } static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave, int extra) { struct res_common *ret; switch (type) { case RES_QP: ret = alloc_qp_tr(id); break; case RES_MPT: ret = alloc_mpt_tr(id, extra); break; case RES_MTT: ret = alloc_mtt_tr(id, extra); break; case RES_EQ: ret = alloc_eq_tr(id); break; case RES_CQ: ret = alloc_cq_tr(id); break; case RES_SRQ: ret = alloc_srq_tr(id); break; case RES_MAC: printk(KERN_ERR "implementation missing\n"); return NULL; case RES_COUNTER: ret = alloc_counter_tr(id); break; default: return NULL; } if (ret) ret->owner = slave; return ret; } static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count, enum mlx4_resource type, int extra) { int i; int err; struct mlx4_priv *priv = mlx4_priv(dev); struct res_common **res_arr; struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct radix_tree_root *root = &tracker->res_tree[type]; res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); if (!res_arr) return -ENOMEM; for (i = 0; i < count; ++i) { res_arr[i] = alloc_tr(base + i, type, slave, extra); if (!res_arr[i]) { for (--i; i >= 0; --i) kfree(res_arr[i]); kfree(res_arr); return -ENOMEM; } } spin_lock_irq(mlx4_tlock(dev)); for (i = 0; i < count; ++i) { if (find_res(dev, base + i, type)) { err = -EEXIST; goto undo; } err = radix_tree_insert(root, base + i, res_arr[i]); if (err) goto undo; list_add_tail(&res_arr[i]->list, &tracker->slave_list[slave].res_list[type]); } spin_unlock_irq(mlx4_tlock(dev)); kfree(res_arr); return 0; undo: for (--i; i >= base; --i) radix_tree_delete(&tracker->res_tree[type], i); spin_unlock_irq(mlx4_tlock(dev)); for (i = 0; i < count; ++i) kfree(res_arr[i]); kfree(res_arr); return err; } static int remove_qp_ok(struct res_qp *res) { if (res->com.state == RES_QP_BUSY) return -EBUSY; else if (res->com.state != RES_QP_RESERVED) return -EPERM; return 0; } static int remove_mtt_ok(struct res_mtt *res, int order) { if (res->com.state == RES_MTT_BUSY || atomic_read(&res->ref_count)) { printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n", __func__, __LINE__, mtt_states_str(res->com.state), atomic_read(&res->ref_count)); return -EBUSY; } else if (res->com.state != RES_MTT_ALLOCATED) return -EPERM; else if (res->order != order) return -EINVAL; return 0; } static int remove_mpt_ok(struct res_mpt *res) { if (res->com.state == RES_MPT_BUSY) return -EBUSY; else if (res->com.state != RES_MPT_RESERVED) return -EPERM; return 0; } static int remove_eq_ok(struct res_eq *res) { if (res->com.state == RES_MPT_BUSY) return -EBUSY; else if (res->com.state != RES_MPT_RESERVED) return -EPERM; return 0; } static int remove_counter_ok(struct res_counter *res) { if (res->com.state == RES_COUNTER_BUSY) return -EBUSY; else if (res->com.state != RES_COUNTER_ALLOCATED) return -EPERM; return 0; } static int remove_cq_ok(struct res_cq *res) { if (res->com.state == RES_CQ_BUSY) return -EBUSY; else if (res->com.state != RES_CQ_ALLOCATED) return -EPERM; return 0; } static int remove_srq_ok(struct res_srq *res) { if (res->com.state == RES_SRQ_BUSY) return -EBUSY; else if (res->com.state != RES_SRQ_ALLOCATED) return -EPERM; return 0; } static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) { switch (type) { case RES_QP: return remove_qp_ok((struct res_qp *)res); case RES_CQ: return remove_cq_ok((struct res_cq *)res); case RES_SRQ: return remove_srq_ok((struct res_srq *)res); case RES_MPT: return remove_mpt_ok((struct res_mpt *)res); case RES_MTT: return remove_mtt_ok((struct res_mtt *)res, extra); case RES_MAC: return -ENOSYS; case RES_EQ: return remove_eq_ok((struct res_eq *)res); case RES_COUNTER: return remove_counter_ok((struct res_counter *)res); default: return -EINVAL; } } static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count, enum mlx4_resource type, int extra) { int i; int err; struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct res_common *r; spin_lock_irq(mlx4_tlock(dev)); for (i = base; i < base + count; ++i) { r = radix_tree_lookup(&tracker->res_tree[type], i); if (!r) { err = -ENOENT; goto out; } if (r->owner != slave) { err = -EPERM; goto out; } err = remove_ok(r, type, extra); if (err) goto out; } for (i = base; i < base + count; ++i) { r = radix_tree_lookup(&tracker->res_tree[type], i); radix_tree_delete(&tracker->res_tree[type], i); list_del(&r->list); kfree(r); } err = 0; out: spin_unlock_irq(mlx4_tlock(dev)); return err; } static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, enum res_qp_states state, struct res_qp **qp, int alloc) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct res_qp *r; int err = 0; spin_lock_irq(mlx4_tlock(dev)); r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn); if (!r) err = -ENOENT; else if (r->com.owner != slave) err = -EPERM; else { switch (state) { case RES_QP_BUSY: mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n", __func__, r->com.res_id); err = -EBUSY; break; case RES_QP_RESERVED: if (r->com.state == RES_QP_MAPPED && !alloc) break; mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id); err = -EINVAL; break; case RES_QP_MAPPED: if ((r->com.state == RES_QP_RESERVED && alloc) || r->com.state == RES_QP_HW) break; else { mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id); err = -EINVAL; } break; case RES_QP_HW: if (r->com.state != RES_QP_MAPPED) err = -EINVAL; break; default: err = -EINVAL; } if (!err) { r->com.from_state = r->com.state; r->com.to_state = state; r->com.state = RES_QP_BUSY; if (qp) *qp = (struct res_qp *)r; } } spin_unlock_irq(mlx4_tlock(dev)); return err; } static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, enum res_mpt_states state, struct res_mpt **mpt) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct res_mpt *r; int err = 0; spin_lock_irq(mlx4_tlock(dev)); r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index); if (!r) err = -ENOENT; else if (r->com.owner != slave) err = -EPERM; else { switch (state) { case RES_MPT_BUSY: err = -EINVAL; break; case RES_MPT_RESERVED: if (r->com.state != RES_MPT_MAPPED) err = -EINVAL; break; case RES_MPT_MAPPED: if (r->com.state != RES_MPT_RESERVED && r->com.state != RES_MPT_HW) err = -EINVAL; break; case RES_MPT_HW: if (r->com.state != RES_MPT_MAPPED) err = -EINVAL; break; default: err = -EINVAL; } if (!err) { r->com.from_state = r->com.state; r->com.to_state = state; r->com.state = RES_MPT_BUSY; if (mpt) *mpt = (struct res_mpt *)r; } } spin_unlock_irq(mlx4_tlock(dev)); return err; } static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, enum res_eq_states state, struct res_eq **eq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct res_eq *r; int err = 0; spin_lock_irq(mlx4_tlock(dev)); r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index); if (!r) err = -ENOENT; else if (r->com.owner != slave) err = -EPERM; else { switch (state) { case RES_EQ_BUSY: err = -EINVAL; break; case RES_EQ_RESERVED: if (r->com.state != RES_EQ_HW) err = -EINVAL; break; case RES_EQ_HW: if (r->com.state != RES_EQ_RESERVED) err = -EINVAL; break; default: err = -EINVAL; } if (!err) { r->com.from_state = r->com.state; r->com.to_state = state; r->com.state = RES_EQ_BUSY; if (eq) *eq = r; } } spin_unlock_irq(mlx4_tlock(dev)); return err; } static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, enum res_cq_states state, struct res_cq **cq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct res_cq *r; int err; spin_lock_irq(mlx4_tlock(dev)); r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn); if (!r) err = -ENOENT; else if (r->com.owner != slave) err = -EPERM; else { switch (state) { case RES_CQ_BUSY: err = -EBUSY; break; case RES_CQ_ALLOCATED: if (r->com.state != RES_CQ_HW) err = -EINVAL; else if (atomic_read(&r->ref_count)) err = -EBUSY; else err = 0; break; case RES_CQ_HW: if (r->com.state != RES_CQ_ALLOCATED) err = -EINVAL; else err = 0; break; default: err = -EINVAL; } if (!err) { r->com.from_state = r->com.state; r->com.to_state = state; r->com.state = RES_CQ_BUSY; if (cq) *cq = r; } } spin_unlock_irq(mlx4_tlock(dev)); return err; } static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, enum res_cq_states state, struct res_srq **srq) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct res_srq *r; int err = 0; spin_lock_irq(mlx4_tlock(dev)); r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index); if (!r) err = -ENOENT; else if (r->com.owner != slave) err = -EPERM; else { switch (state) { case RES_SRQ_BUSY: err = -EINVAL; break; case RES_SRQ_ALLOCATED: if (r->com.state != RES_SRQ_HW) err = -EINVAL; else if (atomic_read(&r->ref_count)) err = -EBUSY; break; case RES_SRQ_HW: if (r->com.state != RES_SRQ_ALLOCATED) err = -EINVAL; break; default: err = -EINVAL; } if (!err) { r->com.from_state = r->com.state; r->com.to_state = state; r->com.state = RES_SRQ_BUSY; if (srq) *srq = r; } } spin_unlock_irq(mlx4_tlock(dev)); return err; } static void res_abort_move(struct mlx4_dev *dev, int slave, enum mlx4_resource type, int id) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct res_common *r; spin_lock_irq(mlx4_tlock(dev)); r = radix_tree_lookup(&tracker->res_tree[type], id); if (r && (r->owner == slave)) r->state = r->from_state; spin_unlock_irq(mlx4_tlock(dev)); } static void res_end_move(struct mlx4_dev *dev, int slave, enum mlx4_resource type, int id) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct res_common *r; spin_lock_irq(mlx4_tlock(dev)); r = radix_tree_lookup(&tracker->res_tree[type], id); if (r && (r->owner == slave)) r->state = r->to_state; spin_unlock_irq(mlx4_tlock(dev)); } static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) { return mlx4_is_qp_reserved(dev, qpn); } static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { int err; int count; int align; int base; int qpn; switch (op) { case RES_OP_RESERVE: count = get_param_l(&in_param); align = get_param_h(&in_param); err = __mlx4_qp_reserve_range(dev, count, align, &base); if (err) return err; err = add_res_range(dev, slave, base, count, RES_QP, 0); if (err) { __mlx4_qp_release_range(dev, base, count); return err; } set_param_l(out_param, base); break; case RES_OP_MAP_ICM: qpn = get_param_l(&in_param) & 0x7fffff; if (valid_reserved(dev, slave, qpn)) { err = add_res_range(dev, slave, qpn, 1, RES_QP, 0); if (err) return err; } err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, NULL, 1); if (err) return err; if (!valid_reserved(dev, slave, qpn)) { err = __mlx4_qp_alloc_icm(dev, qpn); if (err) { res_abort_move(dev, slave, RES_QP, qpn); return err; } } res_end_move(dev, slave, RES_QP, qpn); break; default: err = -EINVAL; break; } return err; } static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { int err = -EINVAL; int base; int order; if (op != RES_OP_RESERVE_AND_MAP) return err; order = get_param_l(&in_param); base = __mlx4_alloc_mtt_range(dev, order); if (base == -1) return -ENOMEM; err = add_res_range(dev, slave, base, 1, RES_MTT, order); if (err) __mlx4_free_mtt_range(dev, base, order); else set_param_l(out_param, base); return err; } static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { int err = -EINVAL; int index; int id; struct res_mpt *mpt; switch (op) { case RES_OP_RESERVE: index = __mlx4_mr_reserve(dev); if (index == -1) break; id = index & mpt_mask(dev); err = add_res_range(dev, slave, id, 1, RES_MPT, index); if (err) { __mlx4_mr_release(dev, index); break; } set_param_l(out_param, index); break; case RES_OP_MAP_ICM: index = get_param_l(&in_param); id = index & mpt_mask(dev); err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt); if (err) return err; err = __mlx4_mr_alloc_icm(dev, mpt->key); if (err) { res_abort_move(dev, slave, RES_MPT, id); return err; } res_end_move(dev, slave, RES_MPT, id); break; } return err; } static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { int cqn; int err; switch (op) { case RES_OP_RESERVE_AND_MAP: err = __mlx4_cq_alloc_icm(dev, &cqn); if (err) break; err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); if (err) { __mlx4_cq_free_icm(dev, cqn); break; } set_param_l(out_param, cqn); break; default: err = -EINVAL; } return err; } static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { int srqn; int err; switch (op) { case RES_OP_RESERVE_AND_MAP: err = __mlx4_srq_alloc_icm(dev, &srqn); if (err) break; err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); if (err) { __mlx4_srq_free_icm(dev, srqn); break; } set_param_l(out_param, srqn); break; default: err = -EINVAL; } return err; } static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct mac_res *res; res = kzalloc(sizeof *res, GFP_KERNEL); if (!res) return -ENOMEM; res->mac = mac; res->port = (u8) port; list_add_tail(&res->list, &tracker->slave_list[slave].res_list[RES_MAC]); return 0; } static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct list_head *mac_list = &tracker->slave_list[slave].res_list[RES_MAC]; struct mac_res *res, *tmp; list_for_each_entry_safe(res, tmp, mac_list, list) { if (res->mac == mac && res->port == (u8) port) { list_del(&res->list); kfree(res); break; } } } static void rem_slave_macs(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct list_head *mac_list = &tracker->slave_list[slave].res_list[RES_MAC]; struct mac_res *res, *tmp; list_for_each_entry_safe(res, tmp, mac_list, list) { list_del(&res->list); __mlx4_unregister_mac(dev, res->port, res->mac); kfree(res); } } static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { int err = -EINVAL; int port; u64 mac; if (op != RES_OP_RESERVE_AND_MAP) return err; port = get_param_l(out_param); mac = in_param; err = __mlx4_register_mac(dev, port, mac); if (err >= 0) { set_param_l(out_param, err); err = 0; } if (!err) { err = mac_add_to_slave(dev, slave, mac, port); if (err) __mlx4_unregister_mac(dev, port, mac); } return err; } static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { return 0; } int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int alop = vhcr->op_modifier; switch (vhcr->in_modifier) { case RES_QP: err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); break; case RES_MTT: err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); break; case RES_MPT: err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); break; case RES_CQ: err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); break; case RES_SRQ: err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); break; case RES_MAC: err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); break; case RES_VLAN: err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); break; default: err = -EINVAL; break; } return err; } static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param) { int err; int count; int base; int qpn; switch (op) { case RES_OP_RESERVE: base = get_param_l(&in_param) & 0x7fffff; count = get_param_h(&in_param); err = rem_res_range(dev, slave, base, count, RES_QP, 0); if (err) break; __mlx4_qp_release_range(dev, base, count); break; case RES_OP_MAP_ICM: qpn = get_param_l(&in_param) & 0x7fffff; err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED, NULL, 0); if (err) return err; if (!valid_reserved(dev, slave, qpn)) __mlx4_qp_free_icm(dev, qpn); res_end_move(dev, slave, RES_QP, qpn); if (valid_reserved(dev, slave, qpn)) err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0); break; default: err = -EINVAL; break; } return err; } static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { int err = -EINVAL; int base; int order; if (op != RES_OP_RESERVE_AND_MAP) return err; base = get_param_l(&in_param); order = get_param_h(&in_param); err = rem_res_range(dev, slave, base, 1, RES_MTT, order); if (!err) __mlx4_free_mtt_range(dev, base, order); return err; } static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param) { int err = -EINVAL; int index; int id; struct res_mpt *mpt; switch (op) { case RES_OP_RESERVE: index = get_param_l(&in_param); id = index & mpt_mask(dev); err = get_res(dev, slave, id, RES_MPT, &mpt); if (err) break; index = mpt->key; put_res(dev, slave, id, RES_MPT); err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); if (err) break; __mlx4_mr_release(dev, index); break; case RES_OP_MAP_ICM: index = get_param_l(&in_param); id = index & mpt_mask(dev); err = mr_res_start_move_to(dev, slave, id, RES_MPT_RESERVED, &mpt); if (err) return err; __mlx4_mr_free_icm(dev, mpt->key); res_end_move(dev, slave, RES_MPT, id); return err; break; default: err = -EINVAL; break; } return err; } static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { int cqn; int err; switch (op) { case RES_OP_RESERVE_AND_MAP: cqn = get_param_l(&in_param); err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0); if (err) break; __mlx4_cq_free_icm(dev, cqn); break; default: err = -EINVAL; break; } return err; } static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { int srqn; int err; switch (op) { case RES_OP_RESERVE_AND_MAP: srqn = get_param_l(&in_param); err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0); if (err) break; __mlx4_srq_free_icm(dev, srqn); break; default: err = -EINVAL; break; } return err; } static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { int port; int err = 0; switch (op) { case RES_OP_RESERVE_AND_MAP: port = get_param_l(out_param); mac_del_from_slave(dev, slave, in_param, port); __mlx4_unregister_mac(dev, port, in_param); break; default: err = -EINVAL; break; } return err; } static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, u64 in_param, u64 *out_param) { return 0; } int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err = -EINVAL; int alop = vhcr->op_modifier; switch (vhcr->in_modifier) { case RES_QP: err = qp_free_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param); break; case RES_MTT: err = mtt_free_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); break; case RES_MPT: err = mpt_free_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param); break; case RES_CQ: err = cq_free_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); break; case RES_SRQ: err = srq_free_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); break; case RES_MAC: err = mac_free_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); break; case RES_VLAN: err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, vhcr->in_param, &vhcr->out_param); break; default: break; } return err; } /* ugly but other choices are uglier */ static int mr_phys_mpt(struct mlx4_mpt_entry *mpt) { return (be32_to_cpu(mpt->flags) >> 9) & 1; } static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt) { return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8; } static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt) { return be32_to_cpu(mpt->mtt_sz); } static int qp_get_mtt_addr(struct mlx4_qp_context *qpc) { return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8; } static int srq_get_mtt_addr(struct mlx4_srq_context *srqc) { return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8; } static int qp_get_mtt_size(struct mlx4_qp_context *qpc) { int page_shift = (qpc->log_page_size & 0x3f) + 12; int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf; int log_sq_sride = qpc->sq_size_stride & 7; int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf; int log_rq_stride = qpc->rq_size_stride & 7; int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; int rss = (be32_to_cpu(qpc->flags) >> 13) & 1; int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1; int sq_size; int rq_size; int total_pages; int total_mem; int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; sq_size = 1 << (log_sq_size + log_sq_sride + 4); rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); total_mem = sq_size + rq_size; total_pages = roundup_pow_of_two((total_mem + (page_offset << 6)) >> page_shift); return total_pages; } static int check_mtt_range(struct mlx4_dev *dev, int slave, int start, int size, struct res_mtt *mtt) { int res_start = mtt->com.res_id; int res_size = (1 << mtt->order); if (start < res_start || start + size > res_start + res_size) return -EPERM; return 0; } int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int index = vhcr->in_modifier; struct res_mtt *mtt; struct res_mpt *mpt; int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz; int phys; int id; id = index & mpt_mask(dev); err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt); if (err) return err; phys = mr_phys_mpt(inbox->buf); if (!phys) { err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); if (err) goto ex_abort; err = check_mtt_range(dev, slave, mtt_base, mr_get_mtt_size(inbox->buf), mtt); if (err) goto ex_put; mpt->mtt = mtt; } err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto ex_put; if (!phys) { atomic_inc(&mtt->ref_count); put_res(dev, slave, mtt->com.res_id, RES_MTT); } res_end_move(dev, slave, RES_MPT, id); return 0; ex_put: if (!phys) put_res(dev, slave, mtt->com.res_id, RES_MTT); ex_abort: res_abort_move(dev, slave, RES_MPT, id); return err; } int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int index = vhcr->in_modifier; struct res_mpt *mpt; int id; id = index & mpt_mask(dev); err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt); if (err) return err; err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto ex_abort; if (mpt->mtt) atomic_dec(&mpt->mtt->ref_count); res_end_move(dev, slave, RES_MPT, id); return 0; ex_abort: res_abort_move(dev, slave, RES_MPT, id); return err; } int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int index = vhcr->in_modifier; struct res_mpt *mpt; int id; id = index & mpt_mask(dev); err = get_res(dev, slave, id, RES_MPT, &mpt); if (err) return err; if (mpt->com.from_state != RES_MPT_HW) { err = -EBUSY; goto out; } err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); out: put_res(dev, slave, id, RES_MPT); return err; } static int qp_get_rcqn(struct mlx4_qp_context *qpc) { return be32_to_cpu(qpc->cqn_recv) & 0xffffff; } static int qp_get_scqn(struct mlx4_qp_context *qpc) { return be32_to_cpu(qpc->cqn_send) & 0xffffff; } static u32 qp_get_srqn(struct mlx4_qp_context *qpc) { return be32_to_cpu(qpc->srqn) & 0x1ffffff; } int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int qpn = vhcr->in_modifier & 0x7fffff; struct res_mtt *mtt; struct res_qp *qp; struct mlx4_qp_context *qpc = inbox->buf + 8; int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz; int mtt_size = qp_get_mtt_size(qpc); struct res_cq *rcq; struct res_cq *scq; int rcqn = qp_get_rcqn(qpc); int scqn = qp_get_scqn(qpc); u32 srqn = qp_get_srqn(qpc) & 0xffffff; int use_srq = (qp_get_srqn(qpc) >> 24) & 1; struct res_srq *srq; int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); if (err) return err; qp->local_qpn = local_qpn; err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); if (err) goto ex_abort; err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); if (err) goto ex_put_mtt; err = get_res(dev, slave, rcqn, RES_CQ, &rcq); if (err) goto ex_put_mtt; if (scqn != rcqn) { err = get_res(dev, slave, scqn, RES_CQ, &scq); if (err) goto ex_put_rcq; } else scq = rcq; if (use_srq) { err = get_res(dev, slave, srqn, RES_SRQ, &srq); if (err) goto ex_put_scq; } err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto ex_put_srq; atomic_inc(&mtt->ref_count); qp->mtt = mtt; atomic_inc(&rcq->ref_count); qp->rcq = rcq; atomic_inc(&scq->ref_count); qp->scq = scq; if (scqn != rcqn) put_res(dev, slave, scqn, RES_CQ); if (use_srq) { atomic_inc(&srq->ref_count); put_res(dev, slave, srqn, RES_SRQ); qp->srq = srq; } put_res(dev, slave, rcqn, RES_CQ); put_res(dev, slave, mtt_base, RES_MTT); res_end_move(dev, slave, RES_QP, qpn); return 0; ex_put_srq: if (use_srq) put_res(dev, slave, srqn, RES_SRQ); ex_put_scq: if (scqn != rcqn) put_res(dev, slave, scqn, RES_CQ); ex_put_rcq: put_res(dev, slave, rcqn, RES_CQ); ex_put_mtt: put_res(dev, slave, mtt_base, RES_MTT); ex_abort: res_abort_move(dev, slave, RES_QP, qpn); return err; } static int eq_get_mtt_addr(struct mlx4_eq_context *eqc) { return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8; } static int eq_get_mtt_size(struct mlx4_eq_context *eqc) { int log_eq_size = eqc->log_eq_size & 0x1f; int page_shift = (eqc->log_page_size & 0x3f) + 12; if (log_eq_size + 5 < page_shift) return 1; return 1 << (log_eq_size + 5 - page_shift); } static int cq_get_mtt_addr(struct mlx4_cq_context *cqc) { return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8; } static int cq_get_mtt_size(struct mlx4_cq_context *cqc) { int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f; int page_shift = (cqc->log_page_size & 0x3f) + 12; if (log_cq_size + 5 < page_shift) return 1; return 1 << (log_cq_size + 5 - page_shift); } int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int eqn = vhcr->in_modifier; int res_id = (slave << 8) | eqn; struct mlx4_eq_context *eqc = inbox->buf; int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz; int mtt_size = eq_get_mtt_size(eqc); struct res_eq *eq; struct res_mtt *mtt; err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0); if (err) return err; err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq); if (err) goto out_add; err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); if (err) goto out_move; err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt); if (err) goto out_put; err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto out_put; atomic_inc(&mtt->ref_count); eq->mtt = mtt; put_res(dev, slave, mtt->com.res_id, RES_MTT); res_end_move(dev, slave, RES_EQ, res_id); return 0; out_put: put_res(dev, slave, mtt->com.res_id, RES_MTT); out_move: res_abort_move(dev, slave, RES_EQ, res_id); out_add: rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); return err; } static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start, int len, struct res_mtt **res) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct res_mtt *mtt; int err = -EINVAL; spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT], com.list) { if (!check_mtt_range(dev, slave, start, len, mtt)) { *res = mtt; mtt->com.from_state = mtt->com.state; mtt->com.state = RES_MTT_BUSY; err = 0; break; } } spin_unlock_irq(mlx4_tlock(dev)); return err; } int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { struct mlx4_mtt mtt; __be64 *page_list = inbox->buf; u64 *pg_list = (u64 *)page_list; int i; struct res_mtt *rmtt = NULL; int start = be64_to_cpu(page_list[0]); int npages = vhcr->in_modifier; int err; err = get_containing_mtt(dev, slave, start, npages, &rmtt); if (err) return err; /* Call the SW implementation of write_mtt: * - Prepare a dummy mtt struct * - Translate inbox contents to simple addresses in host endianess */ mtt.offset = 0; /* TBD this is broken but I don't handle it since we don't really use it */ mtt.order = 0; mtt.page_shift = 0; for (i = 0; i < npages; ++i) pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL); err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, ((u64 *)page_list + 2)); if (rmtt) put_res(dev, slave, rmtt->com.res_id, RES_MTT); return err; } int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int eqn = vhcr->in_modifier; int res_id = eqn | (slave << 8); struct res_eq *eq; int err; err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq); if (err) return err; err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL); if (err) goto ex_abort; err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto ex_put; atomic_dec(&eq->mtt->ref_count); put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); res_end_move(dev, slave, RES_EQ, res_id); rem_res_range(dev, slave, res_id, 1, RES_EQ, 0); return 0; ex_put: put_res(dev, slave, eq->mtt->com.res_id, RES_MTT); ex_abort: res_abort_move(dev, slave, RES_EQ, res_id); return err; } int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_event_eq_info *event_eq; struct mlx4_cmd_mailbox *mailbox; u32 in_modifier = 0; int err; int res_id; struct res_eq *req; if (!priv->mfunc.master.slave_state) return -EINVAL; event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; /* Create the event only if the slave is registered */ if (event_eq->eqn < 0) return 0; mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]); res_id = (slave << 8) | event_eq->eqn; err = get_res(dev, slave, res_id, RES_EQ, &req); if (err) goto unlock; if (req->com.from_state != RES_EQ_HW) { err = -EINVAL; goto put; } mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto put; } if (eqe->type == MLX4_EVENT_TYPE_CMD) { ++event_eq->token; eqe->event.cmd.token = cpu_to_be16(event_eq->token); } memcpy(mailbox->buf, (u8 *) eqe, 28); in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16); err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0, MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); put_res(dev, slave, res_id, RES_EQ); mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); mlx4_free_cmd_mailbox(dev, mailbox); return err; put: put_res(dev, slave, res_id, RES_EQ); unlock: mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]); return err; } int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int eqn = vhcr->in_modifier; int res_id = eqn | (slave << 8); struct res_eq *eq; int err; err = get_res(dev, slave, res_id, RES_EQ, &eq); if (err) return err; if (eq->com.from_state != RES_EQ_HW) { err = -EINVAL; goto ex_put; } err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); ex_put: put_res(dev, slave, res_id, RES_EQ); return err; } int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int cqn = vhcr->in_modifier; struct mlx4_cq_context *cqc = inbox->buf; int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; struct res_cq *cq; struct res_mtt *mtt; err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); if (err) return err; err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); if (err) goto out_move; err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); if (err) goto out_put; err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto out_put; atomic_inc(&mtt->ref_count); cq->mtt = mtt; put_res(dev, slave, mtt->com.res_id, RES_MTT); res_end_move(dev, slave, RES_CQ, cqn); return 0; out_put: put_res(dev, slave, mtt->com.res_id, RES_MTT); out_move: res_abort_move(dev, slave, RES_CQ, cqn); return err; } int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int cqn = vhcr->in_modifier; struct res_cq *cq; err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); if (err) return err; err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto out_move; atomic_dec(&cq->mtt->ref_count); res_end_move(dev, slave, RES_CQ, cqn); return 0; out_move: res_abort_move(dev, slave, RES_CQ, cqn); return err; } int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int cqn = vhcr->in_modifier; struct res_cq *cq; int err; err = get_res(dev, slave, cqn, RES_CQ, &cq); if (err) return err; if (cq->com.from_state != RES_CQ_HW) goto ex_put; err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); ex_put: put_res(dev, slave, cqn, RES_CQ); return err; } static int handle_resize(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd, struct res_cq *cq) { int err; struct res_mtt *orig_mtt; struct res_mtt *mtt; struct mlx4_cq_context *cqc = inbox->buf; int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz; err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); if (err) return err; if (orig_mtt != cq->mtt) { err = -EINVAL; goto ex_put; } err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); if (err) goto ex_put; err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt); if (err) goto ex_put1; err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto ex_put1; atomic_dec(&orig_mtt->ref_count); put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); atomic_inc(&mtt->ref_count); cq->mtt = mtt; put_res(dev, slave, mtt->com.res_id, RES_MTT); return 0; ex_put1: put_res(dev, slave, mtt->com.res_id, RES_MTT); ex_put: put_res(dev, slave, orig_mtt->com.res_id, RES_MTT); return err; } int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int cqn = vhcr->in_modifier; struct res_cq *cq; int err; err = get_res(dev, slave, cqn, RES_CQ, &cq); if (err) return err; if (cq->com.from_state != RES_CQ_HW) goto ex_put; if (vhcr->op_modifier == 0) { err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); goto ex_put; } err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); ex_put: put_res(dev, slave, cqn, RES_CQ); return err; } static int srq_get_mtt_size(struct mlx4_srq_context *srqc) { int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf; int log_rq_stride = srqc->logstride & 7; int page_shift = (srqc->log_page_size & 0x3f) + 12; if (log_srq_size + log_rq_stride + 4 < page_shift) return 1; return 1 << (log_srq_size + log_rq_stride + 4 - page_shift); } int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int srqn = vhcr->in_modifier; struct res_mtt *mtt; struct res_srq *srq; struct mlx4_srq_context *srqc = inbox->buf; int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz; if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff)) return -EINVAL; err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq); if (err) return err; err = get_res(dev, slave, mtt_base, RES_MTT, &mtt); if (err) goto ex_abort; err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc), mtt); if (err) goto ex_put_mtt; err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto ex_put_mtt; atomic_inc(&mtt->ref_count); srq->mtt = mtt; put_res(dev, slave, mtt->com.res_id, RES_MTT); res_end_move(dev, slave, RES_SRQ, srqn); return 0; ex_put_mtt: put_res(dev, slave, mtt->com.res_id, RES_MTT); ex_abort: res_abort_move(dev, slave, RES_SRQ, srqn); return err; } int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int srqn = vhcr->in_modifier; struct res_srq *srq; err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq); if (err) return err; err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto ex_abort; atomic_dec(&srq->mtt->ref_count); if (srq->cq) atomic_dec(&srq->cq->ref_count); res_end_move(dev, slave, RES_SRQ, srqn); return 0; ex_abort: res_abort_move(dev, slave, RES_SRQ, srqn); return err; } int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int srqn = vhcr->in_modifier; struct res_srq *srq; err = get_res(dev, slave, srqn, RES_SRQ, &srq); if (err) return err; if (srq->com.from_state != RES_SRQ_HW) { err = -EBUSY; goto out; } err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); out: put_res(dev, slave, srqn, RES_SRQ); return err; } int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int srqn = vhcr->in_modifier; struct res_srq *srq; err = get_res(dev, slave, srqn, RES_SRQ, &srq); if (err) return err; if (srq->com.from_state != RES_SRQ_HW) { err = -EBUSY; goto out; } err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); out: put_res(dev, slave, srqn, RES_SRQ); return err; } int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int qpn = vhcr->in_modifier & 0x7fffff; struct res_qp *qp; err = get_res(dev, slave, qpn, RES_QP, &qp); if (err) return err; if (qp->com.from_state != RES_QP_HW) { err = -EBUSY; goto out; } err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); out: put_res(dev, slave, qpn, RES_QP); return err; } int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { struct mlx4_qp_context *qpc = inbox->buf + 8; update_ud_gid(dev, qpc, (u8)slave); return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); } int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int qpn = vhcr->in_modifier & 0x7fffff; struct res_qp *qp; err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); if (err) return err; err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); if (err) goto ex_abort; atomic_dec(&qp->mtt->ref_count); atomic_dec(&qp->rcq->ref_count); atomic_dec(&qp->scq->ref_count); if (qp->srq) atomic_dec(&qp->srq->ref_count); res_end_move(dev, slave, RES_QP, qpn); return 0; ex_abort: res_abort_move(dev, slave, RES_QP, qpn); return err; } static struct res_gid *find_gid(struct mlx4_dev *dev, int slave, struct res_qp *rqp, u8 *gid) { struct res_gid *res; list_for_each_entry(res, &rqp->mcg_list, list) { if (!memcmp(res->gid, gid, 16)) return res; } return NULL; } static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, u8 *gid, enum mlx4_protocol prot, enum mlx4_steer_type steer) { struct res_gid *res; int err; res = kzalloc(sizeof *res, GFP_KERNEL); if (!res) return -ENOMEM; spin_lock_irq(&rqp->mcg_spl); if (find_gid(dev, slave, rqp, gid)) { kfree(res); err = -EEXIST; } else { memcpy(res->gid, gid, 16); res->prot = prot; res->steer = steer; list_add_tail(&res->list, &rqp->mcg_list); err = 0; } spin_unlock_irq(&rqp->mcg_spl); return err; } static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, u8 *gid, enum mlx4_protocol prot, enum mlx4_steer_type steer) { struct res_gid *res; int err; spin_lock_irq(&rqp->mcg_spl); res = find_gid(dev, slave, rqp, gid); if (!res || res->prot != prot || res->steer != steer) err = -EINVAL; else { list_del(&res->list); kfree(res); err = 0; } spin_unlock_irq(&rqp->mcg_spl); return err; } int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { struct mlx4_qp qp; /* dummy for calling attach/detach */ u8 *gid = inbox->buf; enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7; int err, err1; int qpn; struct res_qp *rqp; int attach = vhcr->op_modifier; int block_loopback = vhcr->in_modifier >> 31; u8 steer_type_mask = 2; enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; qpn = vhcr->in_modifier & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) return err; qp.qpn = qpn; if (attach) { err = add_mcg_res(dev, slave, rqp, gid, prot, type); if (err) goto ex_put; err = mlx4_qp_attach_common(dev, &qp, gid, block_loopback, prot, type); if (err) goto ex_rem; } else { err = rem_mcg_res(dev, slave, rqp, gid, prot, type); if (err) goto ex_put; err = mlx4_qp_detach_common(dev, &qp, gid, prot, type); } put_res(dev, slave, qpn, RES_QP); return 0; ex_rem: /* ignore error return below, already in error */ err1 = rem_mcg_res(dev, slave, rqp, gid, prot, type); ex_put: put_res(dev, slave, qpn, RES_QP); return err; } enum { BUSY_MAX_RETRIES = 10 }; int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { int err; int index = vhcr->in_modifier & 0xffff; err = get_res(dev, slave, index, RES_COUNTER, NULL); if (err) return err; err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd); put_res(dev, slave, index, RES_COUNTER); return err; } static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) { struct res_gid *rgid; struct res_gid *tmp; int err; struct mlx4_qp qp; /* dummy for calling attach/detach */ list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) { qp.qpn = rqp->local_qpn; err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot, rgid->steer); list_del(&rgid->list); kfree(rgid); } } static int _move_all_busy(struct mlx4_dev *dev, int slave, enum mlx4_resource type, int print) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct list_head *rlist = &tracker->slave_list[slave].res_list[type]; struct res_common *r; struct res_common *tmp; int busy; busy = 0; spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(r, tmp, rlist, list) { if (r->owner == slave) { if (!r->removing) { if (r->state == RES_ANY_BUSY) { if (print) mlx4_dbg(dev, "%s id 0x%x is busy\n", ResourceType(type), r->res_id); ++busy; } else { r->from_state = r->state; r->state = RES_ANY_BUSY; r->removing = 1; } } } } spin_unlock_irq(mlx4_tlock(dev)); return busy; } static int move_all_busy(struct mlx4_dev *dev, int slave, enum mlx4_resource type) { unsigned long begin; int busy; begin = jiffies; do { busy = _move_all_busy(dev, slave, type, 0); if (time_after(jiffies, begin + 5 * HZ)) break; if (busy) cond_resched(); } while (busy); if (busy) busy = _move_all_busy(dev, slave, type, 1); return busy; } static void rem_slave_qps(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct list_head *qp_list = &tracker->slave_list[slave].res_list[RES_QP]; struct res_qp *qp; struct res_qp *tmp; int state; u64 in_param; int qpn; int err; err = move_all_busy(dev, slave, RES_QP); if (err) mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" "for slave %d\n", slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(qp, tmp, qp_list, com.list) { spin_unlock_irq(mlx4_tlock(dev)); if (qp->com.owner == slave) { qpn = qp->com.res_id; detach_qp(dev, slave, qp); state = qp->com.from_state; while (state != 0) { switch (state) { case RES_QP_RESERVED: spin_lock_irq(mlx4_tlock(dev)); radix_tree_delete(&tracker->res_tree[RES_QP], qp->com.res_id); list_del(&qp->com.list); spin_unlock_irq(mlx4_tlock(dev)); kfree(qp); state = 0; break; case RES_QP_MAPPED: if (!valid_reserved(dev, slave, qpn)) __mlx4_qp_free_icm(dev, qpn); state = RES_QP_RESERVED; break; case RES_QP_HW: in_param = slave; err = mlx4_cmd(dev, in_param, qp->local_qpn, 2, MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) mlx4_dbg(dev, "rem_slave_qps: failed" " to move slave %d qpn %d to" " reset\n", slave, qp->local_qpn); atomic_dec(&qp->rcq->ref_count); atomic_dec(&qp->scq->ref_count); atomic_dec(&qp->mtt->ref_count); if (qp->srq) atomic_dec(&qp->srq->ref_count); state = RES_QP_MAPPED; break; default: state = 0; } } } spin_lock_irq(mlx4_tlock(dev)); } spin_unlock_irq(mlx4_tlock(dev)); } static void rem_slave_srqs(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct list_head *srq_list = &tracker->slave_list[slave].res_list[RES_SRQ]; struct res_srq *srq; struct res_srq *tmp; int state; u64 in_param; LIST_HEAD(tlist); int srqn; int err; err = move_all_busy(dev, slave, RES_SRQ); if (err) mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " "busy for slave %d\n", slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(srq, tmp, srq_list, com.list) { spin_unlock_irq(mlx4_tlock(dev)); if (srq->com.owner == slave) { srqn = srq->com.res_id; state = srq->com.from_state; while (state != 0) { switch (state) { case RES_SRQ_ALLOCATED: __mlx4_srq_free_icm(dev, srqn); spin_lock_irq(mlx4_tlock(dev)); radix_tree_delete(&tracker->res_tree[RES_SRQ], srqn); list_del(&srq->com.list); spin_unlock_irq(mlx4_tlock(dev)); kfree(srq); state = 0; break; case RES_SRQ_HW: in_param = slave; err = mlx4_cmd(dev, in_param, srqn, 1, MLX4_CMD_HW2SW_SRQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) mlx4_dbg(dev, "rem_slave_srqs: failed" " to move slave %d srq %d to" " SW ownership\n", slave, srqn); atomic_dec(&srq->mtt->ref_count); if (srq->cq) atomic_dec(&srq->cq->ref_count); state = RES_SRQ_ALLOCATED; break; default: state = 0; } } } spin_lock_irq(mlx4_tlock(dev)); } spin_unlock_irq(mlx4_tlock(dev)); } static void rem_slave_cqs(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct list_head *cq_list = &tracker->slave_list[slave].res_list[RES_CQ]; struct res_cq *cq; struct res_cq *tmp; int state; u64 in_param; LIST_HEAD(tlist); int cqn; int err; err = move_all_busy(dev, slave, RES_CQ); if (err) mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " "busy for slave %d\n", slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(cq, tmp, cq_list, com.list) { spin_unlock_irq(mlx4_tlock(dev)); if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { cqn = cq->com.res_id; state = cq->com.from_state; while (state != 0) { switch (state) { case RES_CQ_ALLOCATED: __mlx4_cq_free_icm(dev, cqn); spin_lock_irq(mlx4_tlock(dev)); radix_tree_delete(&tracker->res_tree[RES_CQ], cqn); list_del(&cq->com.list); spin_unlock_irq(mlx4_tlock(dev)); kfree(cq); state = 0; break; case RES_CQ_HW: in_param = slave; err = mlx4_cmd(dev, in_param, cqn, 1, MLX4_CMD_HW2SW_CQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) mlx4_dbg(dev, "rem_slave_cqs: failed" " to move slave %d cq %d to" " SW ownership\n", slave, cqn); atomic_dec(&cq->mtt->ref_count); state = RES_CQ_ALLOCATED; break; default: state = 0; } } } spin_lock_irq(mlx4_tlock(dev)); } spin_unlock_irq(mlx4_tlock(dev)); } static void rem_slave_mrs(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct list_head *mpt_list = &tracker->slave_list[slave].res_list[RES_MPT]; struct res_mpt *mpt; struct res_mpt *tmp; int state; u64 in_param; LIST_HEAD(tlist); int mptn; int err; err = move_all_busy(dev, slave, RES_MPT); if (err) mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " "busy for slave %d\n", slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { spin_unlock_irq(mlx4_tlock(dev)); if (mpt->com.owner == slave) { mptn = mpt->com.res_id; state = mpt->com.from_state; while (state != 0) { switch (state) { case RES_MPT_RESERVED: __mlx4_mr_release(dev, mpt->key); spin_lock_irq(mlx4_tlock(dev)); radix_tree_delete(&tracker->res_tree[RES_MPT], mptn); list_del(&mpt->com.list); spin_unlock_irq(mlx4_tlock(dev)); kfree(mpt); state = 0; break; case RES_MPT_MAPPED: __mlx4_mr_free_icm(dev, mpt->key); state = RES_MPT_RESERVED; break; case RES_MPT_HW: in_param = slave; err = mlx4_cmd(dev, in_param, mptn, 0, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) mlx4_dbg(dev, "rem_slave_mrs: failed" " to move slave %d mpt %d to" " SW ownership\n", slave, mptn); if (mpt->mtt) atomic_dec(&mpt->mtt->ref_count); state = RES_MPT_MAPPED; break; default: state = 0; } } } spin_lock_irq(mlx4_tlock(dev)); } spin_unlock_irq(mlx4_tlock(dev)); } static void rem_slave_mtts(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct list_head *mtt_list = &tracker->slave_list[slave].res_list[RES_MTT]; struct res_mtt *mtt; struct res_mtt *tmp; int state; LIST_HEAD(tlist); int base; int err; err = move_all_busy(dev, slave, RES_MTT); if (err) mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " "busy for slave %d\n", slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { spin_unlock_irq(mlx4_tlock(dev)); if (mtt->com.owner == slave) { base = mtt->com.res_id; state = mtt->com.from_state; while (state != 0) { switch (state) { case RES_MTT_ALLOCATED: __mlx4_free_mtt_range(dev, base, mtt->order); spin_lock_irq(mlx4_tlock(dev)); radix_tree_delete(&tracker->res_tree[RES_MTT], base); list_del(&mtt->com.list); spin_unlock_irq(mlx4_tlock(dev)); kfree(mtt); state = 0; break; default: state = 0; } } } spin_lock_irq(mlx4_tlock(dev)); } spin_unlock_irq(mlx4_tlock(dev)); } static void rem_slave_eqs(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct list_head *eq_list = &tracker->slave_list[slave].res_list[RES_EQ]; struct res_eq *eq; struct res_eq *tmp; int err; int state; LIST_HEAD(tlist); int eqn; struct mlx4_cmd_mailbox *mailbox; err = move_all_busy(dev, slave, RES_EQ); if (err) mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " "busy for slave %d\n", slave); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(eq, tmp, eq_list, com.list) { spin_unlock_irq(mlx4_tlock(dev)); if (eq->com.owner == slave) { eqn = eq->com.res_id; state = eq->com.from_state; while (state != 0) { switch (state) { case RES_EQ_RESERVED: spin_lock_irq(mlx4_tlock(dev)); radix_tree_delete(&tracker->res_tree[RES_EQ], eqn); list_del(&eq->com.list); spin_unlock_irq(mlx4_tlock(dev)); kfree(eq); state = 0; break; case RES_EQ_HW: mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { cond_resched(); continue; } err = mlx4_cmd_box(dev, slave, 0, eqn & 0xff, 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); mlx4_dbg(dev, "rem_slave_eqs: failed" " to move slave %d eqs %d to" " SW ownership\n", slave, eqn); mlx4_free_cmd_mailbox(dev, mailbox); if (!err) { atomic_dec(&eq->mtt->ref_count); state = RES_EQ_RESERVED; } break; default: state = 0; } } } spin_lock_irq(mlx4_tlock(dev)); } spin_unlock_irq(mlx4_tlock(dev)); } void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); /*VLAN*/ rem_slave_macs(dev, slave); rem_slave_qps(dev, slave); rem_slave_srqs(dev, slave); rem_slave_cqs(dev, slave); rem_slave_mrs(dev, slave); rem_slave_eqs(dev, slave); rem_slave_mtts(dev, slave); mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); }
gpl-2.0
playfulgod/android_kernel_lge_kk_zee
drivers/mtd/maps/uclinux.c
4855
3013
/****************************************************************************/ /* * uclinux.c -- generic memory mapped MTD driver for uclinux * * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) */ /****************************************************************************/ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/major.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <asm/io.h> /****************************************************************************/ extern char _ebss; struct map_info uclinux_ram_map = { .name = "RAM", .phys = (unsigned long)&_ebss, .size = 0, }; static struct mtd_info *uclinux_ram_mtdinfo; /****************************************************************************/ static struct mtd_partition uclinux_romfs[] = { { .name = "ROMfs" } }; #define NUM_PARTITIONS ARRAY_SIZE(uclinux_romfs) /****************************************************************************/ static int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys) { struct map_info *map = mtd->priv; *virt = map->virt + from; if (phys) *phys = map->phys + from; *retlen = len; return(0); } /****************************************************************************/ static int __init uclinux_mtd_init(void) { struct mtd_info *mtd; struct map_info *mapp; mapp = &uclinux_ram_map; if (!mapp->size) mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8)))); mapp->bankwidth = 4; printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n", (int) mapp->phys, (int) mapp->size); mapp->virt = ioremap_nocache(mapp->phys, mapp->size); if (mapp->virt == 0) { printk("uclinux[mtd]: ioremap_nocache() failed\n"); return(-EIO); } simple_map_init(mapp); mtd = do_map_probe("map_ram", mapp); if (!mtd) { printk("uclinux[mtd]: failed to find a mapping?\n"); iounmap(mapp->virt); return(-ENXIO); } mtd->owner = THIS_MODULE; mtd->_point = uclinux_point; mtd->priv = mapp; uclinux_ram_mtdinfo = mtd; mtd_device_register(mtd, uclinux_romfs, NUM_PARTITIONS); return(0); } /****************************************************************************/ static void __exit uclinux_mtd_cleanup(void) { if (uclinux_ram_mtdinfo) { mtd_device_unregister(uclinux_ram_mtdinfo); map_destroy(uclinux_ram_mtdinfo); uclinux_ram_mtdinfo = NULL; } if (uclinux_ram_map.virt) { iounmap((void *) uclinux_ram_map.virt); uclinux_ram_map.virt = 0; } } /****************************************************************************/ module_init(uclinux_mtd_init); module_exit(uclinux_mtd_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>"); MODULE_DESCRIPTION("Generic RAM based MTD for uClinux"); /****************************************************************************/
gpl-2.0
Fechinator/FechdaKernel
drivers/scsi/bfa/bfad_bsg.c
4855
88372
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/uaccess.h> #include "bfad_drv.h" #include "bfad_im.h" #include "bfad_bsg.h" BFA_TRC_FILE(LDRV, BSG); int bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; int rc = 0; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); /* If IOC is not in disabled state - return */ if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_IOC_FAILURE; return rc; } init_completion(&bfad->enable_comp); bfa_iocfc_enable(&bfad->bfa); iocmd->status = BFA_STATUS_OK; spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->enable_comp); return rc; } int bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; int rc = 0; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (bfad->disable_active) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); return -EBUSY; } bfad->disable_active = BFA_TRUE; init_completion(&bfad->disable_comp); bfa_iocfc_disable(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->disable_comp); bfad->disable_active = BFA_FALSE; iocmd->status = BFA_STATUS_OK; return rc; } static int bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd) { int i; struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd; struct bfad_im_port_s *im_port; struct bfa_port_attr_s pattr; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcport_get_attr(&bfad->bfa, &pattr); iocmd->nwwn = pattr.nwwn; iocmd->pwwn = pattr.pwwn; iocmd->ioc_type = bfa_get_type(&bfad->bfa); iocmd->mac = bfa_get_mac(&bfad->bfa); iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa); bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum); iocmd->factorynwwn = pattr.factorynwwn; iocmd->factorypwwn = pattr.factorypwwn; iocmd->bfad_num = bfad->inst_no; im_port = bfad->pport.im_port; iocmd->host = im_port->shost->host_no; spin_unlock_irqrestore(&bfad->bfad_lock, flags); strcpy(iocmd->name, bfad->adapter_name); strcpy(iocmd->port_name, bfad->port_name); strcpy(iocmd->hwpath, bfad->pci_name); /* set adapter hw path */ strcpy(iocmd->adapter_hwpath, bfad->pci_name); i = strlen(iocmd->adapter_hwpath) - 1; while (iocmd->adapter_hwpath[i] != '.') i--; iocmd->adapter_hwpath[i] = '\0'; iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* fill in driver attr info */ strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME); strncpy(iocmd->ioc_attr.driver_attr.driver_ver, BFAD_DRIVER_VERSION, BFA_VERSION_LEN); strcpy(iocmd->ioc_attr.driver_attr.fw_ver, iocmd->ioc_attr.adapter_attr.fw_ver); strcpy(iocmd->ioc_attr.driver_attr.bios_ver, iocmd->ioc_attr.adapter_attr.optrom_ver); /* copy chip rev info first otherwise it will be overwritten */ memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev, sizeof(bfad->pci_attr.chip_rev)); memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr, sizeof(struct bfa_ioc_pci_attr_s)); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd; bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_ioc_fwstats_s *iocmd = (struct bfa_bsg_ioc_fwstats_s *)cmd; void *iocmd_bufptr; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_ioc_fwstats_s), sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; goto out; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); goto out; } out: bfa_trc(bfad, 0x6666); return 0; } int bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; if (v_cmd == IOCMD_IOC_RESET_STATS) { bfa_ioc_clear_stats(&bfad->bfa); iocmd->status = BFA_STATUS_OK; } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) { spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } return 0; } int bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd; if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME) strcpy(bfad->adapter_name, iocmd->name); else if (v_cmd == IOCMD_IOC_SET_PORT_NAME) strcpy(bfad->port_name, iocmd->name); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd; iocmd->status = BFA_STATUS_OK; bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr); return 0; } int bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_port_enable(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); return 0; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; return 0; } int bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_port_disable(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); return 0; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; return 0; } static int bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd; struct bfa_lport_attr_s port_attr; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr); bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE) iocmd->attr.pid = port_attr.pid; else iocmd->attr.pid = 0; iocmd->attr.port_type = port_attr.port_type; iocmd->attr.loopback = port_attr.loopback; iocmd->attr.authfail = port_attr.authfail; strncpy(iocmd->attr.port_symname.symname, port_attr.port_cfg.sym_name.symname, sizeof(port_attr.port_cfg.sym_name.symname)); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd; struct bfad_hal_comp fcomp; void *iocmd_bufptr; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_port_stats_s), sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s); init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port, iocmd_bufptr, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); goto out; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); return 0; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; return 0; } int bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd) { struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (v_cmd == IOCMD_PORT_CFG_TOPO) cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param); else if (v_cmd == IOCMD_PORT_CFG_SPEED) cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param); else if (v_cmd == IOCMD_PORT_CFG_ALPA) cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param); else if (v_cmd == IOCMD_PORT_CLR_ALPA) cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_port_cfg_maxfrsize_s *iocmd = (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { if (v_cmd == IOCMD_PORT_BBSC_ENABLE) fcport->cfg.bb_scn_state = BFA_TRUE; else if (v_cmd == IOCMD_PORT_BBSC_DISABLE) fcport->cfg.bb_scn_state = BFA_FALSE; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_lport_s *fcs_port; struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_lport_s *fcs_port; struct bfa_bsg_lport_stats_s *iocmd = (struct bfa_bsg_lport_stats_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_lport_s *fcs_port; struct bfa_bsg_reset_stats_s *iocmd = (struct bfa_bsg_reset_stats_s *)cmd; struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); struct list_head *qe, *qen; struct bfa_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->vpwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } bfa_fcs_lport_clear_stats(fcs_port); /* clear IO stats from all active itnims */ list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag) continue; bfa_itnim_clear_stats(itnim); } spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_lport_s *fcs_port; struct bfa_bsg_lport_iostats_s *iocmd = (struct bfa_bsg_lport_iostats_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats, fcs_port->lp_tag); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_lport_get_rports_s *iocmd = (struct bfa_bsg_lport_get_rports_s *)cmd; struct bfa_fcs_lport_s *fcs_port; unsigned long flags; void *iocmd_bufptr; if (iocmd->nrports == 0) return -EINVAL; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_lport_get_rports_s), sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_lport_get_rports_s); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, 0); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr, &iocmd->nrports); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); if (fcs_rport == NULL) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_RWWN; goto out; } bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_rport_scsi_addr_s *iocmd = (struct bfa_bsg_rport_scsi_addr_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_itnim_s *fcs_itnim; struct bfad_itnim_s *drv_itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); if (fcs_itnim == NULL) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_RWWN; goto out; } drv_itnim = fcs_itnim->itnim_drv; if (drv_itnim && drv_itnim->im_port) iocmd->host = drv_itnim->im_port->shost->host_no; else { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_RWWN; goto out; } iocmd->target = drv_itnim->scsi_tgt_id; spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->bus = 0; iocmd->lun = 0; iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_rport_stats_s *iocmd = (struct bfa_bsg_rport_stats_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); if (fcs_rport == NULL) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_RWWN; goto out; } memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats, sizeof(struct bfa_rport_stats_s)); memcpy((void *)&iocmd->stats.hal_stats, (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats), sizeof(struct bfa_rport_hal_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_rport_reset_stats_s *iocmd = (struct bfa_bsg_rport_reset_stats_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; struct bfa_rport_s *rport; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); if (fcs_rport == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_RWWN; goto out; } memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s)); rport = bfa_fcs_rport_get_halrport(fcs_rport); memset(&rport->stats, 0, sizeof(rport->stats)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_rport_set_speed_s *iocmd = (struct bfa_bsg_rport_set_speed_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (fcs_port == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_LWWN; goto out; } fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn); if (fcs_rport == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_RWWN; goto out; } fcs_rport->rpf.assigned_speed = iocmd->speed; /* Set this speed in f/w only if the RPSC speed is not available */ if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN) bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_vport_s *fcs_vport; struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, iocmd->vf_id, iocmd->vpwwn); if (fcs_vport == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_VWWN; goto out; } bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_vport_s *fcs_vport; struct bfa_bsg_vport_stats_s *iocmd = (struct bfa_bsg_vport_stats_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, iocmd->vf_id, iocmd->vpwwn); if (fcs_vport == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_VWWN; goto out; } memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats, sizeof(struct bfa_vport_stats_s)); memcpy((void *)&iocmd->vport_stats.port_stats, (void *)&fcs_vport->lport.stats, sizeof(struct bfa_lport_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd) { struct bfa_fcs_vport_s *fcs_vport; struct bfa_bsg_reset_stats_s *iocmd = (struct bfa_bsg_reset_stats_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, iocmd->vf_id, iocmd->vpwwn); if (fcs_vport == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_VWWN; goto out; } memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } static int bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_fabric_get_lports_s *iocmd = (struct bfa_bsg_fabric_get_lports_s *)cmd; bfa_fcs_vf_t *fcs_vf; uint32_t nports = iocmd->nports; unsigned long flags; void *iocmd_bufptr; if (nports == 0) { iocmd->status = BFA_STATUS_EINVAL; goto out; } if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_fabric_get_lports_s), sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; goto out; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_fabric_get_lports_s); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); if (fcs_vf == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_VFID; goto out; } bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->nports = nports; iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (cmd == IOCMD_RATELIM_ENABLE) fcport->cfg.ratelimit = BFA_TRUE; else if (cmd == IOCMD_RATELIM_DISABLE) fcport->cfg.ratelimit = BFA_FALSE; if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd) { struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); /* Auto and speeds greater than the supported speed, are invalid */ if ((iocmd->speed == BFA_PORT_SPEED_AUTO) || (iocmd->speed > fcport->speed_sup)) { iocmd->status = BFA_STATUS_UNSUPP_SPEED; spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } fcport->cfg.trl_def_speed = iocmd->speed; iocmd->status = BFA_STATUS_OK; spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcpim_modstats_s *iocmd = (struct bfa_bsg_fcpim_modstats_s *)cmd; struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); struct list_head *qe, *qen; struct bfa_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); /* accumulate IO stats from itnim */ memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s)); list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats)); } spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcpim_modstatsclr_s *iocmd = (struct bfa_bsg_fcpim_modstatsclr_s *)cmd; struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); struct list_head *qe, *qen; struct bfa_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; bfa_itnim_clear_stats(itnim); } memset(&fcpim->del_itn_stats, 0, sizeof(struct bfa_fcpim_del_itn_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcpim_del_itn_stats_s *iocmd = (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd; struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa); unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats, sizeof(struct bfa_fcpim_del_itn_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } static int bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd; struct bfa_fcs_lport_s *fcs_port; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->lpwwn); if (!fcs_port) iocmd->status = BFA_STATUS_UNKNOWN_LWWN; else iocmd->status = bfa_fcs_itnim_attr_get(fcs_port, iocmd->rpwwn, &iocmd->attr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_itnim_iostats_s *iocmd = (struct bfa_bsg_itnim_iostats_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->lpwwn); if (!fcs_port) { iocmd->status = BFA_STATUS_UNKNOWN_LWWN; bfa_trc(bfad, 0); } else { itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); if (itnim == NULL) iocmd->status = BFA_STATUS_UNKNOWN_RWWN; else { iocmd->status = BFA_STATUS_OK; memcpy((void *)&iocmd->iostats, (void *) &(bfa_fcs_itnim_get_halitn(itnim)->stats), sizeof(struct bfa_itnim_iostats_s)); } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_rport_reset_stats_s *iocmd = (struct bfa_bsg_rport_reset_stats_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->pwwn); if (!fcs_port) iocmd->status = BFA_STATUS_UNKNOWN_LWWN; else { itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); if (itnim == NULL) iocmd->status = BFA_STATUS_UNKNOWN_RWWN; else { iocmd->status = BFA_STATUS_OK; bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn); bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim)); } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_itnim_itnstats_s *iocmd = (struct bfa_bsg_itnim_itnstats_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->lpwwn); if (!fcs_port) { iocmd->status = BFA_STATUS_UNKNOWN_LWWN; bfa_trc(bfad, 0); } else { itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); if (itnim == NULL) iocmd->status = BFA_STATUS_UNKNOWN_RWWN; else { iocmd->status = BFA_STATUS_OK; bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn, &iocmd->itnstats); } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_enable(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_disable(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk, &iocmd->pcifn_cfg, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk, &iocmd->pcifn_id, iocmd->port, iocmd->pcifn_class, iocmd->bandwidth, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk, iocmd->pcifn_id, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk, iocmd->pcifn_id, iocmd->bandwidth, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; bfa_trc(bfad, iocmd->status); out: return 0; } int bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_adapter_cfg_mode_s *iocmd = (struct bfa_bsg_adapter_cfg_mode_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags = 0; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk, iocmd->cfg.mode, iocmd->cfg.max_pf, iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_port_cfg_mode_s *iocmd = (struct bfa_bsg_port_cfg_mode_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags = 0; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk, iocmd->instance, iocmd->cfg.mode, iocmd->cfg.max_pf, iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); if (cmd == IOCMD_FLASH_ENABLE_OPTROM) iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk, bfad_hcb_comp, &fcomp); else iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); iocmd->status = BFA_STATUS_OK; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_cee_attr_s *iocmd = (struct bfa_bsg_cee_attr_s *)cmd; void *iocmd_bufptr; struct bfad_hal_comp cee_comp; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_cee_attr_s), sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s); cee_comp.status = 0; init_completion(&cee_comp.comp); mutex_lock(&bfad_mutex); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr, bfad_hcb_comp, &cee_comp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { mutex_unlock(&bfad_mutex); bfa_trc(bfad, 0x5555); goto out; } wait_for_completion(&cee_comp.comp); mutex_unlock(&bfad_mutex); out: return 0; } int bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_cee_stats_s *iocmd = (struct bfa_bsg_cee_stats_s *)cmd; void *iocmd_bufptr; struct bfad_hal_comp cee_comp; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_cee_stats_s), sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s); cee_comp.status = 0; init_completion(&cee_comp.comp); mutex_lock(&bfad_mutex); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr, bfad_hcb_comp, &cee_comp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { mutex_unlock(&bfad_mutex); bfa_trc(bfad, 0x5555); goto out; } wait_for_completion(&cee_comp.comp); mutex_unlock(&bfad_mutex); out: return 0; } int bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) bfa_trc(bfad, 0x5555); return 0; } int bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_SFP_NOT_READY) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_SFP_NOT_READY) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_flash_attr_s *iocmd = (struct bfa_bsg_flash_attr_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type, iocmd->instance, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; void *iocmd_bufptr; struct bfad_hal_comp fcomp; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_flash_s), iocmd->bufsz) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), iocmd->type, iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd; struct bfad_hal_comp fcomp; void *iocmd_bufptr; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_flash_s), iocmd->bufsz) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s); init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type, iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_get_temp_s *iocmd = (struct bfa_bsg_diag_get_temp_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa), &iocmd->result, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_memtest_s *iocmd = (struct bfa_bsg_diag_memtest_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa), &iocmd->memtest, iocmd->pat, &iocmd->result, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_loopback_s *iocmd = (struct bfa_bsg_diag_loopback_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode, iocmd->speed, iocmd->lpcnt, iocmd->pat, &iocmd->result, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_fwping_s *iocmd = (struct bfa_bsg_diag_fwping_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt, iocmd->pattern, &iocmd->result, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) goto out; bfa_trc(bfad, 0x77771); wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force, iocmd->queue, &iocmd->result, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_sfp_show_s *iocmd = (struct bfa_bsg_sfp_show_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; bfa_trc(bfad, iocmd->status); out: return 0; } int bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa), &iocmd->ledtest); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_beacon_s *iocmd = (struct bfa_bsg_diag_beacon_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa), iocmd->beacon, iocmd->link_e2e_beacon, iocmd->second); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_diag_lb_stat_s *iocmd = (struct bfa_bsg_diag_lb_stat_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfa_trc(bfad, iocmd->status); return 0; } int bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_phy_attr_s *iocmd = (struct bfa_bsg_phy_attr_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance, &iocmd->attr, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_phy_stats_s *iocmd = (struct bfa_bsg_phy_stats_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance, &iocmd->stats, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; struct bfad_hal_comp fcomp; void *iocmd_bufptr; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_phy_s), iocmd->bufsz) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa), iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; if (iocmd->status != BFA_STATUS_OK) goto out; out: return 0; } int bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_vhba_attr_s *iocmd = (struct bfa_bsg_vhba_attr_s *)cmd; struct bfa_vhba_attr_s *attr = &iocmd->attr; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); attr->pwwn = bfad->bfa.ioc.attr->pwwn; attr->nwwn = bfad->bfa.ioc.attr->nwwn; attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled; attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa); attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa); iocmd->status = BFA_STATUS_OK; spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd; void *iocmd_bufptr; struct bfad_hal_comp fcomp; unsigned long flags; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_phy_s), iocmd->bufsz) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s); init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa), iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; void *iocmd_bufptr; if (iocmd->bufsz < sizeof(struct bfa_plog_s)) { bfa_trc(bfad, sizeof(struct bfa_plog_s)); iocmd->status = BFA_STATUS_EINVAL; goto out; } iocmd->status = BFA_STATUS_OK; iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s)); out: return 0; } #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */ int bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd, unsigned int payload_len) { struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd; void *iocmd_bufptr; unsigned long flags; u32 offset; if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s), BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) { iocmd->status = BFA_STATUS_VERSION_FAIL; return 0; } if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ || !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) || !IS_ALIGNED(iocmd->offset, sizeof(u32))) { bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ); iocmd->status = BFA_STATUS_EINVAL; goto out; } iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s); spin_lock_irqsave(&bfad->bfad_lock, flags); offset = iocmd->offset; iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr, &offset, &iocmd->bufsz); iocmd->offset = offset; spin_unlock_irqrestore(&bfad->bfad_lock, flags); out: return 0; } int bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; unsigned long flags; if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) { spin_lock_irqsave(&bfad->bfad_lock, flags); bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR) bfad->plog_buf.head = bfad->plog_buf.tail = 0; else if (v_cmd == IOCMD_DEBUG_START_DTRC) bfa_trc_init(bfad->trcmod); else if (v_cmd == IOCMD_DEBUG_STOP_DTRC) bfa_trc_stop(bfad->trcmod); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd; if (iocmd->ctl == BFA_TRUE) bfad->plog_buf.plog_enabled = 1; else bfad->plog_buf.plog_enabled = 0; iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_fcpim_profile_s *iocmd = (struct bfa_bsg_fcpim_profile_s *)cmd; struct timeval tv; unsigned long flags; do_gettimeofday(&tv); spin_lock_irqsave(&bfad->bfad_lock, flags); if (v_cmd == IOCMD_FCPIM_PROFILE_ON) iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec); else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF) iocmd->status = bfa_fcpim_profile_off(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_itnim_ioprofile_s *iocmd = (struct bfa_bsg_itnim_ioprofile_s *)cmd; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_itnim_s *itnim; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, iocmd->vf_id, iocmd->lpwwn); if (!fcs_port) iocmd->status = BFA_STATUS_UNKNOWN_LWWN; else { itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn); if (itnim == NULL) iocmd->status = BFA_STATUS_UNKNOWN_RWWN; else iocmd->status = bfa_itnim_get_ioprofile( bfa_fcs_itnim_get_halitn(itnim), &iocmd->ioprofile); } spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcport_stats_s *iocmd = (struct bfa_bsg_fcport_stats_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; struct bfa_cb_pending_q_s cb_qe; init_completion(&fcomp.comp); bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, &iocmd->stats); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); goto out; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; struct bfa_cb_pending_q_s cb_qe; init_completion(&fcomp.comp); bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); goto out; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn), &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd; struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp; struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; pbcfg->speed = cfgrsp->pbc_cfg.port_speed; memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); iocmd->status = BFA_STATUS_OK; spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa), BFA_FLASH_PART_PXECFG, bfad->bfa.ioc.port_id, &iocmd->cfg, sizeof(struct bfa_ethboot_cfg_s), 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), BFA_FLASH_PART_PXECFG, bfad->bfa.ioc.port_id, &iocmd->cfg, sizeof(struct bfa_ethboot_cfg_s), 0, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) goto out; wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); struct bfa_fcport_trunk_s *trunk = &fcport->trunk; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (v_cmd == IOCMD_TRUNK_ENABLE) { trunk->attr.state = BFA_TRUNK_OFFLINE; bfa_fcport_disable(&bfad->bfa); fcport->cfg.trunked = BFA_TRUE; } else if (v_cmd == IOCMD_TRUNK_DISABLE) { trunk->attr.state = BFA_TRUNK_DISABLED; bfa_fcport_disable(&bfad->bfa); fcport->cfg.trunked = BFA_FALSE; } if (!bfa_fcport_is_disabled(&bfad->bfa)) bfa_fcport_enable(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); struct bfa_fcport_trunk_s *trunk = &fcport->trunk; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); memcpy((void *)&iocmd->attr, (void *)&trunk->attr, sizeof(struct bfa_trunk_attr_s)); iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) { if (v_cmd == IOCMD_QOS_ENABLE) fcport->cfg.qos_enabled = BFA_TRUE; else if (v_cmd == IOCMD_QOS_DISABLE) fcport->cfg.qos_enabled = BFA_FALSE; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->attr.state = fcport->qos_attr.state; iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_qos_vc_attr_s *iocmd = (struct bfa_bsg_qos_vc_attr_s *)cmd; struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; unsigned long flags; u32 i = 0; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count); iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit); iocmd->attr.elp_opmode_flags = be32_to_cpu(bfa_vc_attr->elp_opmode_flags); /* Individual VC info */ while (i < iocmd->attr.total_vc_count) { iocmd->attr.vc_info[i].vc_credit = bfa_vc_attr->vc_info[i].vc_credit; iocmd->attr.vc_info[i].borrow_credit = bfa_vc_attr->vc_info[i].borrow_credit; iocmd->attr.vc_info[i].priority = bfa_vc_attr->vc_info[i].priority; i++; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; return 0; } int bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcport_stats_s *iocmd = (struct bfa_bsg_fcport_stats_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; struct bfa_cb_pending_q_s cb_qe; init_completion(&fcomp.comp); bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, &iocmd->stats); spin_lock_irqsave(&bfad->bfad_lock, flags); WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); goto out; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; struct bfad_hal_comp fcomp; unsigned long flags; struct bfa_cb_pending_q_s cb_qe; init_completion(&fcomp.comp); bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); spin_lock_irqsave(&bfad->bfad_lock, flags); WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (iocmd->status != BFA_STATUS_OK) { bfa_trc(bfad, iocmd->status); goto out; } wait_for_completion(&fcomp.comp); iocmd->status = fcomp.status; out: return 0; } int bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_vf_stats_s *iocmd = (struct bfa_bsg_vf_stats_s *)cmd; struct bfa_fcs_fabric_s *fcs_vf; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); if (fcs_vf == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_VFID; goto out; } memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats, sizeof(struct bfa_vf_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } int bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_vf_reset_stats_s *iocmd = (struct bfa_bsg_vf_reset_stats_s *)cmd; struct bfa_fcs_fabric_s *fcs_vf; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id); if (fcs_vf == NULL) { spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_UNKNOWN_VFID; goto out; } memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s)); spin_unlock_irqrestore(&bfad->bfad_lock, flags); iocmd->status = BFA_STATUS_OK; out: return 0; } /* Function to reset the LUN SCAN mode */ static void bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg) { struct bfad_im_port_s *pport_im = bfad->pport.im_port; struct bfad_vport_s *vport = NULL; /* Set the scsi device LUN SCAN flags for base port */ bfad_reset_sdev_bflags(pport_im, lunmask_cfg); /* Set the scsi device LUN SCAN flags for the vports */ list_for_each_entry(vport, &bfad->vport_list, list_entry) bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg); } int bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd) { struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) { iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE); /* Set the LUN Scanning mode to be Sequential scan */ if (iocmd->status == BFA_STATUS_OK) bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE); } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) { iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE); /* Set the LUN Scanning mode to default REPORT_LUNS scan */ if (iocmd->status == BFA_STATUS_OK) bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE); } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR) iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd) { struct bfa_bsg_fcpim_lunmask_query_s *iocmd = (struct bfa_bsg_fcpim_lunmask_query_s *)cmd; struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } int bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd) { struct bfa_bsg_fcpim_lunmask_s *iocmd = (struct bfa_bsg_fcpim_lunmask_s *)cmd; unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD) iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id, &iocmd->pwwn, iocmd->rpwwn, iocmd->lun); else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE) iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa, iocmd->vf_id, &iocmd->pwwn, iocmd->rpwwn, iocmd->lun); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return 0; } static int bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, unsigned int payload_len) { int rc = -EINVAL; switch (cmd) { case IOCMD_IOC_ENABLE: rc = bfad_iocmd_ioc_enable(bfad, iocmd); break; case IOCMD_IOC_DISABLE: rc = bfad_iocmd_ioc_disable(bfad, iocmd); break; case IOCMD_IOC_GET_INFO: rc = bfad_iocmd_ioc_get_info(bfad, iocmd); break; case IOCMD_IOC_GET_ATTR: rc = bfad_iocmd_ioc_get_attr(bfad, iocmd); break; case IOCMD_IOC_GET_STATS: rc = bfad_iocmd_ioc_get_stats(bfad, iocmd); break; case IOCMD_IOC_GET_FWSTATS: rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len); break; case IOCMD_IOC_RESET_STATS: case IOCMD_IOC_RESET_FWSTATS: rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd); break; case IOCMD_IOC_SET_ADAPTER_NAME: case IOCMD_IOC_SET_PORT_NAME: rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd); break; case IOCMD_IOCFC_GET_ATTR: rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd); break; case IOCMD_IOCFC_SET_INTR: rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd); break; case IOCMD_PORT_ENABLE: rc = bfad_iocmd_port_enable(bfad, iocmd); break; case IOCMD_PORT_DISABLE: rc = bfad_iocmd_port_disable(bfad, iocmd); break; case IOCMD_PORT_GET_ATTR: rc = bfad_iocmd_port_get_attr(bfad, iocmd); break; case IOCMD_PORT_GET_STATS: rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len); break; case IOCMD_PORT_RESET_STATS: rc = bfad_iocmd_port_reset_stats(bfad, iocmd); break; case IOCMD_PORT_CFG_TOPO: case IOCMD_PORT_CFG_SPEED: case IOCMD_PORT_CFG_ALPA: case IOCMD_PORT_CLR_ALPA: rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd); break; case IOCMD_PORT_CFG_MAXFRSZ: rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd); break; case IOCMD_PORT_BBSC_ENABLE: case IOCMD_PORT_BBSC_DISABLE: rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd); break; case IOCMD_LPORT_GET_ATTR: rc = bfad_iocmd_lport_get_attr(bfad, iocmd); break; case IOCMD_LPORT_GET_STATS: rc = bfad_iocmd_lport_get_stats(bfad, iocmd); break; case IOCMD_LPORT_RESET_STATS: rc = bfad_iocmd_lport_reset_stats(bfad, iocmd); break; case IOCMD_LPORT_GET_IOSTATS: rc = bfad_iocmd_lport_get_iostats(bfad, iocmd); break; case IOCMD_LPORT_GET_RPORTS: rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len); break; case IOCMD_RPORT_GET_ATTR: rc = bfad_iocmd_rport_get_attr(bfad, iocmd); break; case IOCMD_RPORT_GET_ADDR: rc = bfad_iocmd_rport_get_addr(bfad, iocmd); break; case IOCMD_RPORT_GET_STATS: rc = bfad_iocmd_rport_get_stats(bfad, iocmd); break; case IOCMD_RPORT_RESET_STATS: rc = bfad_iocmd_rport_clr_stats(bfad, iocmd); break; case IOCMD_RPORT_SET_SPEED: rc = bfad_iocmd_rport_set_speed(bfad, iocmd); break; case IOCMD_VPORT_GET_ATTR: rc = bfad_iocmd_vport_get_attr(bfad, iocmd); break; case IOCMD_VPORT_GET_STATS: rc = bfad_iocmd_vport_get_stats(bfad, iocmd); break; case IOCMD_VPORT_RESET_STATS: rc = bfad_iocmd_vport_clr_stats(bfad, iocmd); break; case IOCMD_FABRIC_GET_LPORTS: rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len); break; case IOCMD_RATELIM_ENABLE: case IOCMD_RATELIM_DISABLE: rc = bfad_iocmd_ratelim(bfad, cmd, iocmd); break; case IOCMD_RATELIM_DEF_SPEED: rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd); break; case IOCMD_FCPIM_FAILOVER: rc = bfad_iocmd_cfg_fcpim(bfad, iocmd); break; case IOCMD_FCPIM_MODSTATS: rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd); break; case IOCMD_FCPIM_MODSTATSCLR: rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd); break; case IOCMD_FCPIM_DEL_ITN_STATS: rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd); break; case IOCMD_ITNIM_GET_ATTR: rc = bfad_iocmd_itnim_get_attr(bfad, iocmd); break; case IOCMD_ITNIM_GET_IOSTATS: rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd); break; case IOCMD_ITNIM_RESET_STATS: rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd); break; case IOCMD_ITNIM_GET_ITNSTATS: rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd); break; case IOCMD_FCPORT_ENABLE: rc = bfad_iocmd_fcport_enable(bfad, iocmd); break; case IOCMD_FCPORT_DISABLE: rc = bfad_iocmd_fcport_disable(bfad, iocmd); break; case IOCMD_IOC_PCIFN_CFG: rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd); break; case IOCMD_PCIFN_CREATE: rc = bfad_iocmd_pcifn_create(bfad, iocmd); break; case IOCMD_PCIFN_DELETE: rc = bfad_iocmd_pcifn_delete(bfad, iocmd); break; case IOCMD_PCIFN_BW: rc = bfad_iocmd_pcifn_bw(bfad, iocmd); break; case IOCMD_ADAPTER_CFG_MODE: rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd); break; case IOCMD_PORT_CFG_MODE: rc = bfad_iocmd_port_cfg_mode(bfad, iocmd); break; case IOCMD_FLASH_ENABLE_OPTROM: case IOCMD_FLASH_DISABLE_OPTROM: rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd); break; case IOCMD_FAA_QUERY: rc = bfad_iocmd_faa_query(bfad, iocmd); break; case IOCMD_CEE_GET_ATTR: rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len); break; case IOCMD_CEE_GET_STATS: rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len); break; case IOCMD_CEE_RESET_STATS: rc = bfad_iocmd_cee_reset_stats(bfad, iocmd); break; case IOCMD_SFP_MEDIA: rc = bfad_iocmd_sfp_media(bfad, iocmd); break; case IOCMD_SFP_SPEED: rc = bfad_iocmd_sfp_speed(bfad, iocmd); break; case IOCMD_FLASH_GET_ATTR: rc = bfad_iocmd_flash_get_attr(bfad, iocmd); break; case IOCMD_FLASH_ERASE_PART: rc = bfad_iocmd_flash_erase_part(bfad, iocmd); break; case IOCMD_FLASH_UPDATE_PART: rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len); break; case IOCMD_FLASH_READ_PART: rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len); break; case IOCMD_DIAG_TEMP: rc = bfad_iocmd_diag_temp(bfad, iocmd); break; case IOCMD_DIAG_MEMTEST: rc = bfad_iocmd_diag_memtest(bfad, iocmd); break; case IOCMD_DIAG_LOOPBACK: rc = bfad_iocmd_diag_loopback(bfad, iocmd); break; case IOCMD_DIAG_FWPING: rc = bfad_iocmd_diag_fwping(bfad, iocmd); break; case IOCMD_DIAG_QUEUETEST: rc = bfad_iocmd_diag_queuetest(bfad, iocmd); break; case IOCMD_DIAG_SFP: rc = bfad_iocmd_diag_sfp(bfad, iocmd); break; case IOCMD_DIAG_LED: rc = bfad_iocmd_diag_led(bfad, iocmd); break; case IOCMD_DIAG_BEACON_LPORT: rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd); break; case IOCMD_DIAG_LB_STAT: rc = bfad_iocmd_diag_lb_stat(bfad, iocmd); break; case IOCMD_PHY_GET_ATTR: rc = bfad_iocmd_phy_get_attr(bfad, iocmd); break; case IOCMD_PHY_GET_STATS: rc = bfad_iocmd_phy_get_stats(bfad, iocmd); break; case IOCMD_PHY_UPDATE_FW: rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len); break; case IOCMD_PHY_READ_FW: rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len); break; case IOCMD_VHBA_QUERY: rc = bfad_iocmd_vhba_query(bfad, iocmd); break; case IOCMD_DEBUG_PORTLOG: rc = bfad_iocmd_porglog_get(bfad, iocmd); break; case IOCMD_DEBUG_FW_CORE: rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len); break; case IOCMD_DEBUG_FW_STATE_CLR: case IOCMD_DEBUG_PORTLOG_CLR: case IOCMD_DEBUG_START_DTRC: case IOCMD_DEBUG_STOP_DTRC: rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd); break; case IOCMD_DEBUG_PORTLOG_CTL: rc = bfad_iocmd_porglog_ctl(bfad, iocmd); break; case IOCMD_FCPIM_PROFILE_ON: case IOCMD_FCPIM_PROFILE_OFF: rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd); break; case IOCMD_ITNIM_GET_IOPROFILE: rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd); break; case IOCMD_FCPORT_GET_STATS: rc = bfad_iocmd_fcport_get_stats(bfad, iocmd); break; case IOCMD_FCPORT_RESET_STATS: rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd); break; case IOCMD_BOOT_CFG: rc = bfad_iocmd_boot_cfg(bfad, iocmd); break; case IOCMD_BOOT_QUERY: rc = bfad_iocmd_boot_query(bfad, iocmd); break; case IOCMD_PREBOOT_QUERY: rc = bfad_iocmd_preboot_query(bfad, iocmd); break; case IOCMD_ETHBOOT_CFG: rc = bfad_iocmd_ethboot_cfg(bfad, iocmd); break; case IOCMD_ETHBOOT_QUERY: rc = bfad_iocmd_ethboot_query(bfad, iocmd); break; case IOCMD_TRUNK_ENABLE: case IOCMD_TRUNK_DISABLE: rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd); break; case IOCMD_TRUNK_GET_ATTR: rc = bfad_iocmd_trunk_get_attr(bfad, iocmd); break; case IOCMD_QOS_ENABLE: case IOCMD_QOS_DISABLE: rc = bfad_iocmd_qos(bfad, iocmd, cmd); break; case IOCMD_QOS_GET_ATTR: rc = bfad_iocmd_qos_get_attr(bfad, iocmd); break; case IOCMD_QOS_GET_VC_ATTR: rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd); break; case IOCMD_QOS_GET_STATS: rc = bfad_iocmd_qos_get_stats(bfad, iocmd); break; case IOCMD_QOS_RESET_STATS: rc = bfad_iocmd_qos_reset_stats(bfad, iocmd); break; case IOCMD_VF_GET_STATS: rc = bfad_iocmd_vf_get_stats(bfad, iocmd); break; case IOCMD_VF_RESET_STATS: rc = bfad_iocmd_vf_clr_stats(bfad, iocmd); break; case IOCMD_FCPIM_LUNMASK_ENABLE: case IOCMD_FCPIM_LUNMASK_DISABLE: case IOCMD_FCPIM_LUNMASK_CLEAR: rc = bfad_iocmd_lunmask(bfad, iocmd, cmd); break; case IOCMD_FCPIM_LUNMASK_QUERY: rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd); break; case IOCMD_FCPIM_LUNMASK_ADD: case IOCMD_FCPIM_LUNMASK_DELETE: rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); break; default: rc = -EINVAL; break; } return rc; } static int bfad_im_bsg_vendor_request(struct fc_bsg_job *job) { uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0]; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) job->shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct request_queue *request_q = job->req->q; void *payload_kbuf; int rc = -EINVAL; /* * Set the BSG device request_queue size to 256 to support * payloads larger than 512*1024K bytes. */ blk_queue_max_segments(request_q, 256); /* Allocate a temp buffer to hold the passed in user space command */ payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); if (!payload_kbuf) { rc = -ENOMEM; goto out; } /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */ sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, payload_kbuf, job->request_payload.payload_len); /* Invoke IOCMD handler - to handle all the vendor command requests */ rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf, job->request_payload.payload_len); if (rc != BFA_STATUS_OK) goto error; /* Copy the response data to the job->reply_payload sg_list */ sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, payload_kbuf, job->reply_payload.payload_len); /* free the command buffer */ kfree(payload_kbuf); /* Fill the BSG job reply data */ job->reply_len = job->reply_payload.payload_len; job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; job->reply->result = rc; job->job_done(job); return rc; error: /* free the command buffer */ kfree(payload_kbuf); out: job->reply->result = rc; job->reply_len = sizeof(uint32_t); job->reply->reply_payload_rcv_len = 0; return rc; } /* FC passthru call backs */ u64 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid) { struct bfad_fcxp *drv_fcxp = bfad_fcxp; struct bfa_sge_s *sge; u64 addr; sge = drv_fcxp->req_sge + sgeid; addr = (u64)(size_t) sge->sg_addr; return addr; } u32 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid) { struct bfad_fcxp *drv_fcxp = bfad_fcxp; struct bfa_sge_s *sge; sge = drv_fcxp->req_sge + sgeid; return sge->sg_len; } u64 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid) { struct bfad_fcxp *drv_fcxp = bfad_fcxp; struct bfa_sge_s *sge; u64 addr; sge = drv_fcxp->rsp_sge + sgeid; addr = (u64)(size_t) sge->sg_addr; return addr; } u32 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid) { struct bfad_fcxp *drv_fcxp = bfad_fcxp; struct bfa_sge_s *sge; sge = drv_fcxp->rsp_sge + sgeid; return sge->sg_len; } void bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfad_fcxp *drv_fcxp = bfad_fcxp; drv_fcxp->req_status = req_status; drv_fcxp->rsp_len = rsp_len; /* bfa_fcxp will be automatically freed by BFA */ drv_fcxp->bfa_fcxp = NULL; complete(&drv_fcxp->comp); } struct bfad_buf_info * bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf, uint32_t payload_len, uint32_t *num_sgles) { struct bfad_buf_info *buf_base, *buf_info; struct bfa_sge_s *sg_table; int sge_num = 1; buf_base = kzalloc((sizeof(struct bfad_buf_info) + sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL); if (!buf_base) return NULL; sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) + (sizeof(struct bfad_buf_info) * sge_num)); /* Allocate dma coherent memory */ buf_info = buf_base; buf_info->size = payload_len; buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size, &buf_info->phys, GFP_KERNEL); if (!buf_info->virt) goto out_free_mem; /* copy the linear bsg buffer to buf_info */ memset(buf_info->virt, 0, buf_info->size); memcpy(buf_info->virt, payload_kbuf, buf_info->size); /* * Setup SG table */ sg_table->sg_len = buf_info->size; sg_table->sg_addr = (void *)(size_t) buf_info->phys; *num_sgles = sge_num; return buf_base; out_free_mem: kfree(buf_base); return NULL; } void bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base, uint32_t num_sgles) { int i; struct bfad_buf_info *buf_info = buf_base; if (buf_base) { for (i = 0; i < num_sgles; buf_info++, i++) { if (buf_info->virt != NULL) dma_free_coherent(&bfad->pcidev->dev, buf_info->size, buf_info->virt, buf_info->phys); } kfree(buf_base); } } int bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp, bfa_bsg_fcpt_t *bsg_fcpt) { struct bfa_fcxp_s *hal_fcxp; struct bfad_s *bfad = drv_fcxp->port->bfad; unsigned long flags; uint8_t lp_tag; spin_lock_irqsave(&bfad->bfad_lock, flags); /* Allocate bfa_fcxp structure */ hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa, drv_fcxp->num_req_sgles, drv_fcxp->num_rsp_sgles, bfad_fcxp_get_req_sgaddr_cb, bfad_fcxp_get_req_sglen_cb, bfad_fcxp_get_rsp_sgaddr_cb, bfad_fcxp_get_rsp_sglen_cb); if (!hal_fcxp) { bfa_trc(bfad, 0); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return BFA_STATUS_ENOMEM; } drv_fcxp->bfa_fcxp = hal_fcxp; lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id); bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag, bsg_fcpt->cts, bsg_fcpt->cos, job->request_payload.payload_len, &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad, job->reply_payload.payload_len, bsg_fcpt->tsecs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return BFA_STATUS_OK; } int bfad_im_bsg_els_ct_request(struct fc_bsg_job *job) { struct bfa_bsg_data *bsg_data; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) job->shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; bfa_bsg_fcpt_t *bsg_fcpt; struct bfad_fcxp *drv_fcxp; struct bfa_fcs_lport_s *fcs_port; struct bfa_fcs_rport_s *fcs_rport; uint32_t command_type = job->request->msgcode; unsigned long flags; struct bfad_buf_info *rsp_buf_info; void *req_kbuf = NULL, *rsp_kbuf = NULL; int rc = -EINVAL; job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */ job->reply->reply_payload_rcv_len = 0; /* Get the payload passed in from userspace */ bsg_data = (struct bfa_bsg_data *) (((char *)job->request) + sizeof(struct fc_bsg_request)); if (bsg_data == NULL) goto out; /* * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload * buffer of size bsg_data->payload_len */ bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL); if (!bsg_fcpt) goto out; if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload, bsg_data->payload_len)) { kfree(bsg_fcpt); goto out; } drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL); if (drv_fcxp == NULL) { kfree(bsg_fcpt); rc = -ENOMEM; goto out; } spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id, bsg_fcpt->lpwwn); if (fcs_port == NULL) { bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN; spin_unlock_irqrestore(&bfad->bfad_lock, flags); goto out_free_mem; } /* Check if the port is online before sending FC Passthru cmd */ if (!bfa_fcs_lport_is_online(fcs_port)) { bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE; spin_unlock_irqrestore(&bfad->bfad_lock, flags); goto out_free_mem; } drv_fcxp->port = fcs_port->bfad_port; if (drv_fcxp->port->bfad == 0) drv_fcxp->port->bfad = bfad; /* Fetch the bfa_rport - if nexus needed */ if (command_type == FC_BSG_HST_ELS_NOLOGIN || command_type == FC_BSG_HST_CT) { /* BSG HST commands: no nexus needed */ drv_fcxp->bfa_rport = NULL; } else if (command_type == FC_BSG_RPT_ELS || command_type == FC_BSG_RPT_CT) { /* BSG RPT commands: nexus needed */ fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port, bsg_fcpt->dpwwn); if (fcs_rport == NULL) { bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN; spin_unlock_irqrestore(&bfad->bfad_lock, flags); goto out_free_mem; } drv_fcxp->bfa_rport = fcs_rport->bfa_rport; } else { /* Unknown BSG msgcode; return -EINVAL */ spin_unlock_irqrestore(&bfad->bfad_lock, flags); goto out_free_mem; } spin_unlock_irqrestore(&bfad->bfad_lock, flags); /* allocate memory for req / rsp buffers */ req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL); if (!req_kbuf) { printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n", bfad->pci_name); rc = -ENOMEM; goto out_free_mem; } rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL); if (!rsp_kbuf) { printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n", bfad->pci_name); rc = -ENOMEM; goto out_free_mem; } /* map req sg - copy the sg_list passed in to the linear buffer */ sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, req_kbuf, job->request_payload.payload_len); drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf, job->request_payload.payload_len, &drv_fcxp->num_req_sgles); if (!drv_fcxp->reqbuf_info) { printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n", bfad->pci_name); rc = -ENOMEM; goto out_free_mem; } drv_fcxp->req_sge = (struct bfa_sge_s *) (((uint8_t *)drv_fcxp->reqbuf_info) + (sizeof(struct bfad_buf_info) * drv_fcxp->num_req_sgles)); /* map rsp sg */ drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf, job->reply_payload.payload_len, &drv_fcxp->num_rsp_sgles); if (!drv_fcxp->rspbuf_info) { printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n", bfad->pci_name); rc = -ENOMEM; goto out_free_mem; } rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info; drv_fcxp->rsp_sge = (struct bfa_sge_s *) (((uint8_t *)drv_fcxp->rspbuf_info) + (sizeof(struct bfad_buf_info) * drv_fcxp->num_rsp_sgles)); /* fcxp send */ init_completion(&drv_fcxp->comp); rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt); if (rc == BFA_STATUS_OK) { wait_for_completion(&drv_fcxp->comp); bsg_fcpt->status = drv_fcxp->req_status; } else { bsg_fcpt->status = rc; goto out_free_mem; } /* fill the job->reply data */ if (drv_fcxp->req_status == BFA_STATUS_OK) { job->reply_len = drv_fcxp->rsp_len; job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len; job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; } else { job->reply->reply_payload_rcv_len = sizeof(struct fc_bsg_ctels_reply); job->reply_len = sizeof(uint32_t); job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT; } /* Copy the response data to the reply_payload sg list */ sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, (uint8_t *)rsp_buf_info->virt, job->reply_payload.payload_len); out_free_mem: bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info, drv_fcxp->num_rsp_sgles); bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info, drv_fcxp->num_req_sgles); kfree(req_kbuf); kfree(rsp_kbuf); /* Need a copy to user op */ if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt, bsg_data->payload_len)) rc = -EIO; kfree(bsg_fcpt); kfree(drv_fcxp); out: job->reply->result = rc; if (rc == BFA_STATUS_OK) job->job_done(job); return rc; } int bfad_im_bsg_request(struct fc_bsg_job *job) { uint32_t rc = BFA_STATUS_OK; switch (job->request->msgcode) { case FC_BSG_HST_VENDOR: /* Process BSG HST Vendor requests */ rc = bfad_im_bsg_vendor_request(job); break; case FC_BSG_HST_ELS_NOLOGIN: case FC_BSG_RPT_ELS: case FC_BSG_HST_CT: case FC_BSG_RPT_CT: /* Process BSG ELS/CT commands */ rc = bfad_im_bsg_els_ct_request(job); break; default: job->reply->result = rc = -EINVAL; job->reply->reply_payload_rcv_len = 0; break; } return rc; } int bfad_im_bsg_timeout(struct fc_bsg_job *job) { /* Don't complete the BSG job request - return -EAGAIN * to reset bsg job timeout : for ELS/CT pass thru we * already have timer to track the request. */ return -EAGAIN; }
gpl-2.0
iyahman/android_kernel_samsung_jf
drivers/xen/platform-pci.c
4855
4665
/****************************************************************************** * platform-pci.c * * Xen platform PCI device driver * Copyright (c) 2005, Intel Corporation. * Copyright (c) 2007, XenSource Inc. * Copyright (c) 2010, Citrix * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pci.h> #include <xen/platform_pci.h> #include <xen/grant_table.h> #include <xen/xenbus.h> #include <xen/events.h> #include <xen/hvm.h> #include <xen/xen-ops.h> #define DRV_NAME "xen-platform-pci" MODULE_AUTHOR("ssmith@xensource.com and stefano.stabellini@eu.citrix.com"); MODULE_DESCRIPTION("Xen platform PCI device"); MODULE_LICENSE("GPL"); static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; static uint64_t callback_via; unsigned long alloc_xen_mmio(unsigned long len) { unsigned long addr; addr = platform_mmio + platform_mmio_alloc; platform_mmio_alloc += len; BUG_ON(platform_mmio_alloc > platform_mmiolen); return addr; } static uint64_t get_callback_via(struct pci_dev *pdev) { u8 pin; int irq; irq = pdev->irq; if (irq < 16) return irq; /* ISA IRQ */ pin = pdev->pin; /* We don't know the GSI. Specify the PCI INTx line instead. */ return ((uint64_t)0x01 << 56) | /* PCI INTx identifier */ ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | ((uint64_t)(pin - 1) & 3); } static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id) { xen_hvm_evtchn_do_upcall(); return IRQ_HANDLED; } static int xen_allocate_irq(struct pci_dev *pdev) { return request_irq(pdev->irq, do_hvm_evtchn_intr, IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TRIGGER_RISING, "xen-platform-pci", pdev); } static int platform_pci_resume(struct pci_dev *pdev) { int err; if (xen_have_vector_callback) return 0; err = xen_set_callback_via(callback_via); if (err) { dev_err(&pdev->dev, "platform_pci_resume failure!\n"); return err; } return 0; } static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int i, ret; long ioaddr; long mmio_addr, mmio_len; unsigned int max_nr_gframes; i = pci_enable_device(pdev); if (i) return i; ioaddr = pci_resource_start(pdev, 0); mmio_addr = pci_resource_start(pdev, 1); mmio_len = pci_resource_len(pdev, 1); if (mmio_addr == 0 || ioaddr == 0) { dev_err(&pdev->dev, "no resources found\n"); ret = -ENOENT; goto pci_out; } ret = pci_request_region(pdev, 1, DRV_NAME); if (ret < 0) goto pci_out; ret = pci_request_region(pdev, 0, DRV_NAME); if (ret < 0) goto mem_out; platform_mmio = mmio_addr; platform_mmiolen = mmio_len; if (!xen_have_vector_callback) { ret = xen_allocate_irq(pdev); if (ret) { dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret); goto out; } callback_via = get_callback_via(pdev); ret = xen_set_callback_via(callback_via); if (ret) { dev_warn(&pdev->dev, "Unable to set the evtchn callback " "err=%d\n", ret); goto out; } } max_nr_gframes = gnttab_max_grant_frames(); xen_hvm_resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); ret = gnttab_init(); if (ret) goto out; xenbus_probe(NULL); return 0; out: pci_release_region(pdev, 0); mem_out: pci_release_region(pdev, 1); pci_out: pci_disable_device(pdev); return ret; } static struct pci_device_id platform_pci_tbl[] __devinitdata = { {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; MODULE_DEVICE_TABLE(pci, platform_pci_tbl); static struct pci_driver platform_driver = { .name = DRV_NAME, .probe = platform_pci_init, .id_table = platform_pci_tbl, #ifdef CONFIG_PM .resume_early = platform_pci_resume, #endif }; static int __init platform_pci_module_init(void) { return pci_register_driver(&platform_driver); } module_init(platform_pci_module_init);
gpl-2.0
CyanogenMod-E1/cafkernel
drivers/staging/keucr/scsiglue.c
5111
12168
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/module.h> #include <linux/mutex.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_devinfo.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include "usb.h" #include "scsiglue.h" #include "transport.h" /* Host functions */ /* * host_info() */ static const char *host_info(struct Scsi_Host *host) { /* pr_info("scsiglue --- host_info\n"); */ return "SCSI emulation for USB Mass Storage devices"; } /* * slave_alloc() */ static int slave_alloc(struct scsi_device *sdev) { struct us_data *us = host_to_us(sdev->host); /* pr_info("scsiglue --- slave_alloc\n"); */ sdev->inquiry_len = 36; blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); if (us->subclass == USB_SC_UFI) sdev->sdev_target->pdt_1f_for_no_lun = 1; return 0; } /* * slave_configure() */ static int slave_configure(struct scsi_device *sdev) { struct us_data *us = host_to_us(sdev->host); /* pr_info("scsiglue --- slave_configure\n"); */ if (us->fflags & (US_FL_MAX_SECTORS_64 | US_FL_MAX_SECTORS_MIN)) { unsigned int max_sectors = 64; if (us->fflags & US_FL_MAX_SECTORS_MIN) max_sectors = PAGE_CACHE_SIZE >> 9; if (queue_max_sectors(sdev->request_queue) > max_sectors) blk_queue_max_hw_sectors(sdev->request_queue, max_sectors); } if (sdev->type == TYPE_DISK) { if (us->subclass != USB_SC_SCSI && us->subclass != USB_SC_CYP_ATACB) sdev->use_10_for_ms = 1; sdev->use_192_bytes_for_3f = 1; if (us->fflags & US_FL_NO_WP_DETECT) sdev->skip_ms_page_3f = 1; sdev->skip_ms_page_8 = 1; if (us->fflags & US_FL_FIX_CAPACITY) sdev->fix_capacity = 1; if (us->fflags & US_FL_CAPACITY_HEURISTICS) sdev->guess_capacity = 1; if (sdev->scsi_level > SCSI_2) sdev->sdev_target->scsi_level = sdev->scsi_level = SCSI_2; sdev->retry_hwerror = 1; sdev->allow_restart = 1; sdev->last_sector_bug = 1; } else { sdev->use_10_for_ms = 1; } if ((us->protocol == USB_PR_CB || us->protocol == USB_PR_CBI) && sdev->scsi_level == SCSI_UNKNOWN) us->max_lun = 0; if (us->fflags & US_FL_NOT_LOCKABLE) sdev->lockable = 0; return 0; } /* This is always called with scsi_lock(host) held */ /* * queuecommand() */ static int queuecommand_lck(struct scsi_cmnd *srb, void (*done)(struct scsi_cmnd *)) { struct us_data *us = host_to_us(srb->device->host); /* pr_info("scsiglue --- queuecommand\n"); */ /* check for state-transition errors */ if (us->srb != NULL) { /* pr_info("Error in %s: us->srb = %p\n" __FUNCTION__, us->srb); */ return SCSI_MLQUEUE_HOST_BUSY; } /* fail the command if we are disconnecting */ if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) { pr_info("Fail command during disconnect\n"); srb->result = DID_NO_CONNECT << 16; done(srb); return 0; } /* enqueue the command and wake up the control thread */ srb->scsi_done = done; us->srb = srb; complete(&us->cmnd_ready); return 0; } static DEF_SCSI_QCMD(queuecommand) /*********************************************************************** * Error handling functions ***********************************************************************/ /* Command timeout and abort */ /* * command_abort() */ static int command_abort(struct scsi_cmnd *srb) { struct us_data *us = host_to_us(srb->device->host); /* pr_info("scsiglue --- command_abort\n"); */ scsi_lock(us_to_host(us)); if (us->srb != srb) { scsi_unlock(us_to_host(us)); printk("-- nothing to abort\n"); return FAILED; } set_bit(US_FLIDX_TIMED_OUT, &us->dflags); if (!test_bit(US_FLIDX_RESETTING, &us->dflags)) { set_bit(US_FLIDX_ABORTING, &us->dflags); usb_stor_stop_transport(us); } scsi_unlock(us_to_host(us)); /* Wait for the aborted command to finish */ wait_for_completion(&us->notify); return SUCCESS; } /* This invokes the transport reset mechanism to reset the state of the * device. */ /* * device_reset() */ static int device_reset(struct scsi_cmnd *srb) { struct us_data *us = host_to_us(srb->device->host); int result; /* pr_info("scsiglue --- device_reset\n"); */ /* lock the device pointers and do the reset */ mutex_lock(&(us->dev_mutex)); result = us->transport_reset(us); mutex_unlock(&us->dev_mutex); return result < 0 ? FAILED : SUCCESS; } /* * bus_reset() */ static int bus_reset(struct scsi_cmnd *srb) { struct us_data *us = host_to_us(srb->device->host); int result; /* pr_info("scsiglue --- bus_reset\n"); */ result = usb_stor_port_reset(us); return result < 0 ? FAILED : SUCCESS; } /* * usb_stor_report_device_reset() */ void usb_stor_report_device_reset(struct us_data *us) { int i; struct Scsi_Host *host = us_to_host(us); /* pr_info("scsiglue --- usb_stor_report_device_reset\n"); */ scsi_report_device_reset(host, 0, 0); if (us->fflags & US_FL_SCM_MULT_TARG) { for (i = 1; i < host->max_id; ++i) scsi_report_device_reset(host, 0, i); } } /* * usb_stor_report_bus_reset() */ void usb_stor_report_bus_reset(struct us_data *us) { struct Scsi_Host *host = us_to_host(us); /* pr_info("scsiglue --- usb_stor_report_bus_reset\n"); */ scsi_lock(host); scsi_report_bus_reset(host, 0); scsi_unlock(host); } /*********************************************************************** * /proc/scsi/ functions ***********************************************************************/ /* we use this macro to help us write into the buffer */ #undef SPRINTF #define SPRINTF(args...) \ do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0) /* * proc_info() */ static int proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) { struct us_data *us = host_to_us(host); char *pos = buffer; const char *string; /* pr_info("scsiglue --- proc_info\n"); */ if (inout) return length; /* print the controller name */ SPRINTF(" Host scsi%d: usb-storage\n", host->host_no); /* print product, vendor, and serial number strings */ if (us->pusb_dev->manufacturer) string = us->pusb_dev->manufacturer; else if (us->unusual_dev->vendorName) string = us->unusual_dev->vendorName; else string = "Unknown"; SPRINTF(" Vendor: %s\n", string); if (us->pusb_dev->product) string = us->pusb_dev->product; else if (us->unusual_dev->productName) string = us->unusual_dev->productName; else string = "Unknown"; SPRINTF(" Product: %s\n", string); if (us->pusb_dev->serial) string = us->pusb_dev->serial; else string = "None"; SPRINTF("Serial Number: %s\n", string); /* show the protocol and transport */ SPRINTF(" Protocol: %s\n", us->protocol_name); SPRINTF(" Transport: %s\n", us->transport_name); /* show the device flags */ if (pos < buffer + length) { pos += sprintf(pos, " Quirks:"); #define US_FLAG(name, value) \ if (us->fflags & value)\ pos += sprintf(pos, " " #name); US_DO_ALL_FLAGS #undef US_FLAG *(pos++) = '\n'; } /* Calculate start of next buffer, and return value. */ *start = buffer + offset; if ((pos - buffer) < offset) return 0; else if ((pos - buffer - offset) < length) return pos - buffer - offset; else return length; } /*********************************************************************** * Sysfs interface ***********************************************************************/ /* Output routine for the sysfs max_sectors file */ /* * show_max_sectors() */ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); /* pr_info("scsiglue --- ssize_t show_max_sectors\n"); */ return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue)); } /* Input routine for the sysfs max_sectors file */ /* * store_max_sectors() */ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); unsigned short ms; /* pr_info("scsiglue --- ssize_t store_max_sectors\n"); */ if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) { blk_queue_max_hw_sectors(sdev->request_queue, ms); return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(max_sectors, S_IRUGO | S_IWUSR, show_max_sectors, store_max_sectors); static struct device_attribute *sysfs_device_attr_list[] = {&dev_attr_max_sectors, NULL, }; /* this defines our host template, with which we'll allocate hosts */ /* * usb_stor_host_template() */ struct scsi_host_template usb_stor_host_template = { /* basic userland interface stuff */ .name = "eucr-storage", .proc_name = "eucr-storage", .proc_info = proc_info, .info = host_info, /* command interface -- queued only */ .queuecommand = queuecommand, /* error and abort handlers */ .eh_abort_handler = command_abort, .eh_device_reset_handler = device_reset, .eh_bus_reset_handler = bus_reset, /* queue commands only, only one command per LUN */ .can_queue = 1, .cmd_per_lun = 1, /* unknown initiator id */ .this_id = -1, .slave_alloc = slave_alloc, .slave_configure = slave_configure, /* lots of sg segments can be handled */ .sg_tablesize = SG_ALL, /* limit the total size of a transfer to 120 KB */ .max_sectors = 240, /* merge commands... this seems to help performance, but * periodically someone should test to see which setting is more * optimal. */ .use_clustering = 1, /* emulated HBA */ .emulated = 1, /* we do our own delay after a device or bus reset */ .skip_settle_delay = 1, /* sysfs device attributes */ .sdev_attrs = sysfs_device_attr_list, /* module management */ .module = THIS_MODULE }; /* To Report "Illegal Request: Invalid Field in CDB */ unsigned char usb_stor_sense_invalidCDB[18] = { [0] = 0x70, /* current error */ [2] = ILLEGAL_REQUEST, /* Illegal Request = 0x05 */ [7] = 0x0a, /* additional length */ [12] = 0x24 /* Invalid Field in CDB */ }; /*********************************************************************** * Scatter-gather transfer buffer access routines ***********************************************************************/ /* * usb_stor_access_xfer_buf() */ unsigned int usb_stor_access_xfer_buf(struct us_data *us, unsigned char *buffer, unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **sgptr, unsigned int *offset, enum xfer_buf_dir dir) { unsigned int cnt; /* pr_info("transport --- usb_stor_access_xfer_buf\n"); */ struct scatterlist *sg = *sgptr; if (!sg) sg = scsi_sglist(srb); cnt = 0; while (cnt < buflen && sg) { struct page *page = sg_page(sg) + ((sg->offset + *offset) >> PAGE_SHIFT); unsigned int poff = (sg->offset + *offset) & (PAGE_SIZE-1); unsigned int sglen = sg->length - *offset; if (sglen > buflen - cnt) { /* Transfer ends within this s-g entry */ sglen = buflen - cnt; *offset += sglen; } else { /* Transfer continues to next s-g entry */ *offset = 0; sg = sg_next(sg); } while (sglen > 0) { unsigned int plen = min(sglen, (unsigned int)PAGE_SIZE - poff); unsigned char *ptr = kmap(page); if (dir == TO_XFER_BUF) memcpy(ptr + poff, buffer + cnt, plen); else memcpy(buffer + cnt, ptr + poff, plen); kunmap(page); /* Start at the beginning of the next page */ poff = 0; ++page; cnt += plen; sglen -= plen; } } *sgptr = sg; /* Return the amount actually transferred */ return cnt; } /* * Store the contents of buffer into srb's transfer * buffer and set the SCSI residue. */ /* * usb_stor_set_xfer_buf() */ void usb_stor_set_xfer_buf(struct us_data *us, unsigned char *buffer, unsigned int buflen, struct scsi_cmnd *srb, unsigned int dir) { unsigned int offset = 0; struct scatterlist *sg = NULL; /* pr_info("transport --- usb_stor_set_xfer_buf\n"); */ /* TO_XFER_BUF = 0, FROM_XFER_BUF = 1 */ buflen = min(buflen, scsi_bufflen(srb)); buflen = usb_stor_access_xfer_buf(us, buffer, buflen, srb, &sg, &offset, dir); if (buflen < scsi_bufflen(srb)) scsi_set_resid(srb, scsi_bufflen(srb) - buflen); }
gpl-2.0
PaoloW8/kernel_ZOPO
drivers/scsi/wd33c93.c
8183
67224
/* * Copyright (c) 1996 John Shifflett, GeoLog Consulting * john@geolog.com * jshiffle@netcom.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Drew Eckhardt's excellent 'Generic NCR5380' sources from Linux-PC * provided much of the inspiration and some of the code for this * driver. Everything I know about Amiga DMA was gleaned from careful * reading of Hamish Mcdonald's original wd33c93 driver; in fact, I * borrowed shamelessly from all over that source. Thanks Hamish! * * _This_ driver is (I feel) an improvement over the old one in * several respects: * * - Target Disconnection/Reconnection is now supported. Any * system with more than one device active on the SCSI bus * will benefit from this. The driver defaults to what I * call 'adaptive disconnect' - meaning that each command * is evaluated individually as to whether or not it should * be run with the option to disconnect/reselect (if the * device chooses), or as a "SCSI-bus-hog". * * - Synchronous data transfers are now supported. Because of * a few devices that choke after telling the driver that * they can do sync transfers, we don't automatically use * this faster protocol - it can be enabled via the command- * line on a device-by-device basis. * * - Runtime operating parameters can now be specified through * the 'amiboot' or the 'insmod' command line. For amiboot do: * "amiboot [usual stuff] wd33c93=blah,blah,blah" * The defaults should be good for most people. See the comment * for 'setup_strings' below for more details. * * - The old driver relied exclusively on what the Western Digital * docs call "Combination Level 2 Commands", which are a great * idea in that the CPU is relieved of a lot of interrupt * overhead. However, by accepting a certain (user-settable) * amount of additional interrupts, this driver achieves * better control over the SCSI bus, and data transfers are * almost as fast while being much easier to define, track, * and debug. * * * TODO: * more speed. linked commands. * * * People with bug reports, wish-lists, complaints, comments, * or improvements are asked to pah-leeez email me (John Shifflett) * at john@geolog.com or jshiffle@netcom.com! I'm anxious to get * this thing into as good a shape as possible, and I'm positive * there are lots of lurking bugs and "Stupid Places". * * Updates: * * Added support for pre -A chips, which don't have advanced features * and will generate CSR_RESEL rather than CSR_RESEL_AM. * Richard Hirst <richard@sleepie.demon.co.uk> August 2000 * * Added support for Burst Mode DMA and Fast SCSI. Enabled the use of * default_sx_per for asynchronous data transfers. Added adjustment * of transfer periods in sx_table to the actual input-clock. * peter fuerst <post@pfrst.de> February 2007 */ #include <linux/module.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/blkdev.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <asm/irq.h> #include "wd33c93.h" #define optimum_sx_per(hostdata) (hostdata)->sx_table[1].period_ns #define WD33C93_VERSION "1.26++" #define WD33C93_DATE "10/Feb/2007" MODULE_AUTHOR("John Shifflett"); MODULE_DESCRIPTION("Generic WD33C93 SCSI driver"); MODULE_LICENSE("GPL"); /* * 'setup_strings' is a single string used to pass operating parameters and * settings from the kernel/module command-line to the driver. 'setup_args[]' * is an array of strings that define the compile-time default values for * these settings. If Linux boots with an amiboot or insmod command-line, * those settings are combined with 'setup_args[]'. Note that amiboot * command-lines are prefixed with "wd33c93=" while insmod uses a * "setup_strings=" prefix. The driver recognizes the following keywords * (lower case required) and arguments: * * - nosync:bitmask -bitmask is a byte where the 1st 7 bits correspond with * the 7 possible SCSI devices. Set a bit to negotiate for * asynchronous transfers on that device. To maintain * backwards compatibility, a command-line such as * "wd33c93=255" will be automatically translated to * "wd33c93=nosync:0xff". * - nodma:x -x = 1 to disable DMA, x = 0 to enable it. Argument is * optional - if not present, same as "nodma:1". * - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer * period. Default is 500; acceptable values are 250 - 1000. * - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them. * x = 1 does 'adaptive' disconnects, which is the default * and generally the best choice. * - debug:x -If 'DEBUGGING_ON' is defined, x is a bit mask that causes * various types of debug output to printed - see the DB_xxx * defines in wd33c93.h * - clock:x -x = clock input in MHz for WD33c93 chip. Normal values * would be from 8 through 20. Default is 8. * - burst:x -x = 1 to use Burst Mode (or Demand-Mode) DMA, x = 0 to use * Single Byte DMA, which is the default. Argument is * optional - if not present, same as "burst:1". * - fast:x -x = 1 to enable Fast SCSI, which is only effective with * input-clock divisor 4 (WD33C93_FS_16_20), x = 0 to disable * it, which is the default. Argument is optional - if not * present, same as "fast:1". * - next -No argument. Used to separate blocks of keywords when * there's more than one host adapter in the system. * * Syntax Notes: * - Numeric arguments can be decimal or the '0x' form of hex notation. There * _must_ be a colon between a keyword and its numeric argument, with no * spaces. * - Keywords are separated by commas, no spaces, in the standard kernel * command-line manner. * - A keyword in the 'nth' comma-separated command-line member will overwrite * the 'nth' element of setup_args[]. A blank command-line member (in * other words, a comma with no preceding keyword) will _not_ overwrite * the corresponding setup_args[] element. * - If a keyword is used more than once, the first one applies to the first * SCSI host found, the second to the second card, etc, unless the 'next' * keyword is used to change the order. * * Some amiboot examples (for insmod, use 'setup_strings' instead of 'wd33c93'): * - wd33c93=nosync:255 * - wd33c93=nodma * - wd33c93=nodma:1 * - wd33c93=disconnect:2,nosync:0x08,period:250 * - wd33c93=debug:0x1c */ /* Normally, no defaults are specified */ static char *setup_args[] = { "", "", "", "", "", "", "", "", "", "" }; static char *setup_strings; module_param(setup_strings, charp, 0); static void wd33c93_execute(struct Scsi_Host *instance); #ifdef CONFIG_WD33C93_PIO static inline uchar read_wd33c93(const wd33c93_regs regs, uchar reg_num) { uchar data; outb(reg_num, regs.SASR); data = inb(regs.SCMD); return data; } static inline unsigned long read_wd33c93_count(const wd33c93_regs regs) { unsigned long value; outb(WD_TRANSFER_COUNT_MSB, regs.SASR); value = inb(regs.SCMD) << 16; value |= inb(regs.SCMD) << 8; value |= inb(regs.SCMD); return value; } static inline uchar read_aux_stat(const wd33c93_regs regs) { return inb(regs.SASR); } static inline void write_wd33c93(const wd33c93_regs regs, uchar reg_num, uchar value) { outb(reg_num, regs.SASR); outb(value, regs.SCMD); } static inline void write_wd33c93_count(const wd33c93_regs regs, unsigned long value) { outb(WD_TRANSFER_COUNT_MSB, regs.SASR); outb((value >> 16) & 0xff, regs.SCMD); outb((value >> 8) & 0xff, regs.SCMD); outb( value & 0xff, regs.SCMD); } #define write_wd33c93_cmd(regs, cmd) \ write_wd33c93((regs), WD_COMMAND, (cmd)) static inline void write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[]) { int i; outb(WD_CDB_1, regs.SASR); for (i=0; i<len; i++) outb(cmnd[i], regs.SCMD); } #else /* CONFIG_WD33C93_PIO */ static inline uchar read_wd33c93(const wd33c93_regs regs, uchar reg_num) { *regs.SASR = reg_num; mb(); return (*regs.SCMD); } static unsigned long read_wd33c93_count(const wd33c93_regs regs) { unsigned long value; *regs.SASR = WD_TRANSFER_COUNT_MSB; mb(); value = *regs.SCMD << 16; value |= *regs.SCMD << 8; value |= *regs.SCMD; mb(); return value; } static inline uchar read_aux_stat(const wd33c93_regs regs) { return *regs.SASR; } static inline void write_wd33c93(const wd33c93_regs regs, uchar reg_num, uchar value) { *regs.SASR = reg_num; mb(); *regs.SCMD = value; mb(); } static void write_wd33c93_count(const wd33c93_regs regs, unsigned long value) { *regs.SASR = WD_TRANSFER_COUNT_MSB; mb(); *regs.SCMD = value >> 16; *regs.SCMD = value >> 8; *regs.SCMD = value; mb(); } static inline void write_wd33c93_cmd(const wd33c93_regs regs, uchar cmd) { *regs.SASR = WD_COMMAND; mb(); *regs.SCMD = cmd; mb(); } static inline void write_wd33c93_cdb(const wd33c93_regs regs, uint len, uchar cmnd[]) { int i; *regs.SASR = WD_CDB_1; for (i = 0; i < len; i++) *regs.SCMD = cmnd[i]; } #endif /* CONFIG_WD33C93_PIO */ static inline uchar read_1_byte(const wd33c93_regs regs) { uchar asr; uchar x = 0; write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_wd33c93_cmd(regs, WD_CMD_TRANS_INFO | 0x80); do { asr = read_aux_stat(regs); if (asr & ASR_DBR) x = read_wd33c93(regs, WD_DATA); } while (!(asr & ASR_INT)); return x; } static int round_period(unsigned int period, const struct sx_period *sx_table) { int x; for (x = 1; sx_table[x].period_ns; x++) { if ((period <= sx_table[x - 0].period_ns) && (period > sx_table[x - 1].period_ns)) { return x; } } return 7; } /* * Calculate Synchronous Transfer Register value from SDTR code. */ static uchar calc_sync_xfer(unsigned int period, unsigned int offset, unsigned int fast, const struct sx_period *sx_table) { /* When doing Fast SCSI synchronous data transfers, the corresponding * value in 'sx_table' is two times the actually used transfer period. */ uchar result; if (offset && fast) { fast = STR_FSS; period *= 2; } else { fast = 0; } period *= 4; /* convert SDTR code to ns */ result = sx_table[round_period(period,sx_table)].reg_value; result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF; result |= fast; return result; } /* * Calculate SDTR code bytes [3],[4] from period and offset. */ static inline void calc_sync_msg(unsigned int period, unsigned int offset, unsigned int fast, uchar msg[2]) { /* 'period' is a "normal"-mode value, like the ones in 'sx_table'. The * actually used transfer period for Fast SCSI synchronous data * transfers is half that value. */ period /= 4; if (offset && fast) period /= 2; msg[0] = period; msg[1] = offset; } static int wd33c93_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct WD33C93_hostdata *hostdata; struct scsi_cmnd *tmp; hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata; DB(DB_QUEUE_COMMAND, printk("Q-%d-%02x( ", cmd->device->id, cmd->cmnd[0])) /* Set up a few fields in the scsi_cmnd structure for our own use: * - host_scribble is the pointer to the next cmd in the input queue * - scsi_done points to the routine we call when a cmd is finished * - result is what you'd expect */ cmd->host_scribble = NULL; cmd->scsi_done = done; cmd->result = 0; /* We use the Scsi_Pointer structure that's included with each command * as a scratchpad (as it's intended to be used!). The handy thing about * the SCp.xxx fields is that they're always associated with a given * cmd, and are preserved across disconnect-reselect. This means we * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages * if we keep all the critical pointers and counters in SCp: * - SCp.ptr is the pointer into the RAM buffer * - SCp.this_residual is the size of that buffer * - SCp.buffer points to the current scatter-gather buffer * - SCp.buffers_residual tells us how many S.G. buffers there are * - SCp.have_data_in is not used * - SCp.sent_command is not used * - SCp.phase records this command's SRCID_ER bit setting */ if (scsi_bufflen(cmd)) { cmd->SCp.buffer = scsi_sglist(cmd); cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); cmd->SCp.this_residual = cmd->SCp.buffer->length; } else { cmd->SCp.buffer = NULL; cmd->SCp.buffers_residual = 0; cmd->SCp.ptr = NULL; cmd->SCp.this_residual = 0; } /* WD docs state that at the conclusion of a "LEVEL2" command, the * status byte can be retrieved from the LUN register. Apparently, * this is the case only for *uninterrupted* LEVEL2 commands! If * there are any unexpected phases entered, even if they are 100% * legal (different devices may choose to do things differently), * the LEVEL2 command sequence is exited. This often occurs prior * to receiving the status byte, in which case the driver does a * status phase interrupt and gets the status byte on its own. * While such a command can then be "resumed" (ie restarted to * finish up as a LEVEL2 command), the LUN register will NOT be * a valid status byte at the command's conclusion, and we must * use the byte obtained during the earlier interrupt. Here, we * preset SCp.Status to an illegal value (0xff) so that when * this command finally completes, we can tell where the actual * status byte is stored. */ cmd->SCp.Status = ILLEGAL_STATUS_BYTE; /* * Add the cmd to the end of 'input_Q'. Note that REQUEST SENSE * commands are added to the head of the queue so that the desired * sense data is not lost before REQUEST_SENSE executes. */ spin_lock_irq(&hostdata->lock); if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) { cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } else { /* find the end of the queue */ for (tmp = (struct scsi_cmnd *) hostdata->input_Q; tmp->host_scribble; tmp = (struct scsi_cmnd *) tmp->host_scribble) ; tmp->host_scribble = (uchar *) cmd; } /* We know that there's at least one command in 'input_Q' now. * Go see if any of them are runnable! */ wd33c93_execute(cmd->device->host); DB(DB_QUEUE_COMMAND, printk(")Q ")) spin_unlock_irq(&hostdata->lock); return 0; } DEF_SCSI_QCMD(wd33c93_queuecommand) /* * This routine attempts to start a scsi command. If the host_card is * already connected, we give up immediately. Otherwise, look through * the input_Q, using the first command we find that's intended * for a currently non-busy target/lun. * * wd33c93_execute() is always called with interrupts disabled or from * the wd33c93_intr itself, which means that a wd33c93 interrupt * cannot occur while we are in here. */ static void wd33c93_execute(struct Scsi_Host *instance) { struct WD33C93_hostdata *hostdata = (struct WD33C93_hostdata *) instance->hostdata; const wd33c93_regs regs = hostdata->regs; struct scsi_cmnd *cmd, *prev; DB(DB_EXECUTE, printk("EX(")) if (hostdata->selecting || hostdata->connected) { DB(DB_EXECUTE, printk(")EX-0 ")) return; } /* * Search through the input_Q for a command destined * for an idle target/lun. */ cmd = (struct scsi_cmnd *) hostdata->input_Q; prev = NULL; while (cmd) { if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun))) break; prev = cmd; cmd = (struct scsi_cmnd *) cmd->host_scribble; } /* quit if queue empty or all possible targets are busy */ if (!cmd) { DB(DB_EXECUTE, printk(")EX-1 ")) return; } /* remove command from queue */ if (prev) prev->host_scribble = cmd->host_scribble; else hostdata->input_Q = (struct scsi_cmnd *) cmd->host_scribble; #ifdef PROC_STATISTICS hostdata->cmd_cnt[cmd->device->id]++; #endif /* * Start the selection process */ if (cmd->sc_data_direction == DMA_TO_DEVICE) write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id); else write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD); /* Now we need to figure out whether or not this command is a good * candidate for disconnect/reselect. We guess to the best of our * ability, based on a set of hierarchical rules. When several * devices are operating simultaneously, disconnects are usually * an advantage. In a single device system, or if only 1 device * is being accessed, transfers usually go faster if disconnects * are not allowed: * * + Commands should NEVER disconnect if hostdata->disconnect = * DIS_NEVER (this holds for tape drives also), and ALWAYS * disconnect if hostdata->disconnect = DIS_ALWAYS. * + Tape drive commands should always be allowed to disconnect. * + Disconnect should be allowed if disconnected_Q isn't empty. * + Commands should NOT disconnect if input_Q is empty. * + Disconnect should be allowed if there are commands in input_Q * for a different target/lun. In this case, the other commands * should be made disconnect-able, if not already. * * I know, I know - this code would flunk me out of any * "C Programming 101" class ever offered. But it's easy * to change around and experiment with for now. */ cmd->SCp.phase = 0; /* assume no disconnect */ if (hostdata->disconnect == DIS_NEVER) goto no; if (hostdata->disconnect == DIS_ALWAYS) goto yes; if (cmd->device->type == 1) /* tape drive? */ goto yes; if (hostdata->disconnected_Q) /* other commands disconnected? */ goto yes; if (!(hostdata->input_Q)) /* input_Q empty? */ goto no; for (prev = (struct scsi_cmnd *) hostdata->input_Q; prev; prev = (struct scsi_cmnd *) prev->host_scribble) { if ((prev->device->id != cmd->device->id) || (prev->device->lun != cmd->device->lun)) { for (prev = (struct scsi_cmnd *) hostdata->input_Q; prev; prev = (struct scsi_cmnd *) prev->host_scribble) prev->SCp.phase = 1; goto yes; } } goto no; yes: cmd->SCp.phase = 1; #ifdef PROC_STATISTICS hostdata->disc_allowed_cnt[cmd->device->id]++; #endif no: write_wd33c93(regs, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0)); write_wd33c93(regs, WD_TARGET_LUN, cmd->device->lun); write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]); hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); if ((hostdata->level2 == L2_NONE) || (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) { /* * Do a 'Select-With-ATN' command. This will end with * one of the following interrupts: * CSR_RESEL_AM: failure - can try again later. * CSR_TIMEOUT: failure - give up. * CSR_SELECT: success - proceed. */ hostdata->selecting = cmd; /* Every target has its own synchronous transfer setting, kept in the * sync_xfer array, and a corresponding status byte in sync_stat[]. * Each target's sync_stat[] entry is initialized to SX_UNSET, and its * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET * means that the parameters are undetermined as yet, and that we * need to send an SDTR message to this device after selection is * complete: We set SS_FIRST to tell the interrupt routine to do so. * If we've been asked not to try synchronous transfers on this * target (and _all_ luns within it), we'll still send the SDTR message * later, but at that time we'll negotiate for async by specifying a * sync fifo depth of 0. */ if (hostdata->sync_stat[cmd->device->id] == SS_UNSET) hostdata->sync_stat[cmd->device->id] = SS_FIRST; hostdata->state = S_SELECTING; write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */ write_wd33c93_cmd(regs, WD_CMD_SEL_ATN); } else { /* * Do a 'Select-With-ATN-Xfer' command. This will end with * one of the following interrupts: * CSR_RESEL_AM: failure - can try again later. * CSR_TIMEOUT: failure - give up. * anything else: success - proceed. */ hostdata->connected = cmd; write_wd33c93(regs, WD_COMMAND_PHASE, 0); /* copy command_descriptor_block into WD chip * (take advantage of auto-incrementing) */ write_wd33c93_cdb(regs, cmd->cmd_len, cmd->cmnd); /* The wd33c93 only knows about Group 0, 1, and 5 commands when * it's doing a 'select-and-transfer'. To be safe, we write the * size of the CDB into the OWN_ID register for every case. This * way there won't be problems with vendor-unique, audio, etc. */ write_wd33c93(regs, WD_OWN_ID, cmd->cmd_len); /* When doing a non-disconnect command with DMA, we can save * ourselves a DATA phase interrupt later by setting everything * up ahead of time. */ if ((cmd->SCp.phase == 0) && (hostdata->no_dma == 0)) { if (hostdata->dma_setup(cmd, (cmd->sc_data_direction == DMA_TO_DEVICE) ? DATA_OUT_DIR : DATA_IN_DIR)) write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */ else { write_wd33c93_count(regs, cmd->SCp.this_residual); write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | hostdata->dma_mode); hostdata->dma = D_DMA_RUNNING; } } else write_wd33c93_count(regs, 0); /* guarantee a DATA_PHASE interrupt */ hostdata->state = S_RUNNING_LEVEL2; write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); } /* * Since the SCSI bus can handle only 1 connection at a time, * we get out of here now. If the selection fails, or when * the command disconnects, we'll come back to this routine * to search the input_Q again... */ DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : "")) } static void transfer_pio(const wd33c93_regs regs, uchar * buf, int cnt, int data_in_dir, struct WD33C93_hostdata *hostdata) { uchar asr; DB(DB_TRANSFER, printk("(%p,%d,%s:", buf, cnt, data_in_dir ? "in" : "out")) write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_wd33c93_count(regs, cnt); write_wd33c93_cmd(regs, WD_CMD_TRANS_INFO); if (data_in_dir) { do { asr = read_aux_stat(regs); if (asr & ASR_DBR) *buf++ = read_wd33c93(regs, WD_DATA); } while (!(asr & ASR_INT)); } else { do { asr = read_aux_stat(regs); if (asr & ASR_DBR) write_wd33c93(regs, WD_DATA, *buf++); } while (!(asr & ASR_INT)); } /* Note: we are returning with the interrupt UN-cleared. * Since (presumably) an entire I/O operation has * completed, the bus phase is probably different, and * the interrupt routine will discover this when it * responds to the uncleared int. */ } static void transfer_bytes(const wd33c93_regs regs, struct scsi_cmnd *cmd, int data_in_dir) { struct WD33C93_hostdata *hostdata; unsigned long length; hostdata = (struct WD33C93_hostdata *) cmd->device->host->hostdata; /* Normally, you'd expect 'this_residual' to be non-zero here. * In a series of scatter-gather transfers, however, this * routine will usually be called with 'this_residual' equal * to 0 and 'buffers_residual' non-zero. This means that a * previous transfer completed, clearing 'this_residual', and * now we need to setup the next scatter-gather buffer as the * source or destination for THIS transfer. */ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { ++cmd->SCp.buffer; --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); } if (!cmd->SCp.this_residual) /* avoid bogus setups */ return; write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]); /* 'hostdata->no_dma' is TRUE if we don't even want to try DMA. * Update 'this_residual' and 'ptr' after 'transfer_pio()' returns. */ if (hostdata->no_dma || hostdata->dma_setup(cmd, data_in_dir)) { #ifdef PROC_STATISTICS hostdata->pio_cnt++; #endif transfer_pio(regs, (uchar *) cmd->SCp.ptr, cmd->SCp.this_residual, data_in_dir, hostdata); length = cmd->SCp.this_residual; cmd->SCp.this_residual = read_wd33c93_count(regs); cmd->SCp.ptr += (length - cmd->SCp.this_residual); } /* We are able to do DMA (in fact, the Amiga hardware is * already going!), so start up the wd33c93 in DMA mode. * We set 'hostdata->dma' = D_DMA_RUNNING so that when the * transfer completes and causes an interrupt, we're * reminded to tell the Amiga to shut down its end. We'll * postpone the updating of 'this_residual' and 'ptr' * until then. */ else { #ifdef PROC_STATISTICS hostdata->dma_cnt++; #endif write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | hostdata->dma_mode); write_wd33c93_count(regs, cmd->SCp.this_residual); if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) { write_wd33c93(regs, WD_COMMAND_PHASE, 0x45); write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else write_wd33c93_cmd(regs, WD_CMD_TRANS_INFO); hostdata->dma = D_DMA_RUNNING; } } void wd33c93_intr(struct Scsi_Host *instance) { struct WD33C93_hostdata *hostdata = (struct WD33C93_hostdata *) instance->hostdata; const wd33c93_regs regs = hostdata->regs; struct scsi_cmnd *patch, *cmd; uchar asr, sr, phs, id, lun, *ucp, msg; unsigned long length, flags; asr = read_aux_stat(regs); if (!(asr & ASR_INT) || (asr & ASR_BSY)) return; spin_lock_irqsave(&hostdata->lock, flags); #ifdef PROC_STATISTICS hostdata->int_cnt++; #endif cmd = (struct scsi_cmnd *) hostdata->connected; /* assume we're connected */ sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear the interrupt */ phs = read_wd33c93(regs, WD_COMMAND_PHASE); DB(DB_INTR, printk("{%02x:%02x-", asr, sr)) /* After starting a DMA transfer, the next interrupt * is guaranteed to be in response to completion of * the transfer. Since the Amiga DMA hardware runs in * in an open-ended fashion, it needs to be told when * to stop; do that here if D_DMA_RUNNING is true. * Also, we have to update 'this_residual' and 'ptr' * based on the contents of the TRANSFER_COUNT register, * in case the device decided to do an intermediate * disconnect (a device may do this if it has to do a * seek, or just to be nice and let other devices have * some bus time during long transfers). After doing * whatever is needed, we go on and service the WD3393 * interrupt normally. */ if (hostdata->dma == D_DMA_RUNNING) { DB(DB_TRANSFER, printk("[%p/%d:", cmd->SCp.ptr, cmd->SCp.this_residual)) hostdata->dma_stop(cmd->device->host, cmd, 1); hostdata->dma = D_DMA_OFF; length = cmd->SCp.this_residual; cmd->SCp.this_residual = read_wd33c93_count(regs); cmd->SCp.ptr += (length - cmd->SCp.this_residual); DB(DB_TRANSFER, printk("%p/%d]", cmd->SCp.ptr, cmd->SCp.this_residual)) } /* Respond to the specific WD3393 interrupt - there are quite a few! */ switch (sr) { case CSR_TIMEOUT: DB(DB_INTR, printk("TIMEOUT")) if (hostdata->state == S_RUNNING_LEVEL2) hostdata->connected = NULL; else { cmd = (struct scsi_cmnd *) hostdata->selecting; /* get a valid cmd */ hostdata->selecting = NULL; } cmd->result = DID_NO_CONNECT << 16; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; cmd->scsi_done(cmd); /* From esp.c: * There is a window of time within the scsi_done() path * of execution where interrupts are turned back on full * blast and left that way. During that time we could * reconnect to a disconnected command, then we'd bomb * out below. We could also end up executing two commands * at _once_. ...just so you know why the restore_flags() * is here... */ spin_unlock_irqrestore(&hostdata->lock, flags); /* We are not connected to a target - check to see if there * are commands waiting to be executed. */ wd33c93_execute(instance); break; /* Note: this interrupt should not occur in a LEVEL2 command */ case CSR_SELECT: DB(DB_INTR, printk("SELECT")) hostdata->connected = cmd = (struct scsi_cmnd *) hostdata->selecting; hostdata->selecting = NULL; /* construct an IDENTIFY message with correct disconnect bit */ hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->device->lun); if (cmd->SCp.phase) hostdata->outgoing_msg[0] |= 0x40; if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) { hostdata->sync_stat[cmd->device->id] = SS_WAITING; /* Tack on a 2nd message to ask about synchronous transfers. If we've * been asked to do only asynchronous transfers on this device, we * request a fifo depth of 0, which is equivalent to async - should * solve the problems some people have had with GVP's Guru ROM. */ hostdata->outgoing_msg[1] = EXTENDED_MESSAGE; hostdata->outgoing_msg[2] = 3; hostdata->outgoing_msg[3] = EXTENDED_SDTR; if (hostdata->no_sync & (1 << cmd->device->id)) { calc_sync_msg(hostdata->default_sx_per, 0, 0, hostdata->outgoing_msg + 4); } else { calc_sync_msg(optimum_sx_per(hostdata), OPTIMUM_SX_OFF, hostdata->fast, hostdata->outgoing_msg + 4); } hostdata->outgoing_len = 6; #ifdef SYNC_DEBUG ucp = hostdata->outgoing_msg + 1; printk(" sending SDTR %02x03%02x%02x%02x ", ucp[0], ucp[2], ucp[3], ucp[4]); #endif } else hostdata->outgoing_len = 1; hostdata->state = S_CONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); break; case CSR_XFER_DONE | PHS_DATA_IN: case CSR_UNEXP | PHS_DATA_IN: case CSR_SRV_REQ | PHS_DATA_IN: DB(DB_INTR, printk("IN-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual)) transfer_bytes(regs, cmd, DATA_IN_DIR); if (hostdata->state != S_RUNNING_LEVEL2) hostdata->state = S_CONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); break; case CSR_XFER_DONE | PHS_DATA_OUT: case CSR_UNEXP | PHS_DATA_OUT: case CSR_SRV_REQ | PHS_DATA_OUT: DB(DB_INTR, printk("OUT-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual)) transfer_bytes(regs, cmd, DATA_OUT_DIR); if (hostdata->state != S_RUNNING_LEVEL2) hostdata->state = S_CONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); break; /* Note: this interrupt should not occur in a LEVEL2 command */ case CSR_XFER_DONE | PHS_COMMAND: case CSR_UNEXP | PHS_COMMAND: case CSR_SRV_REQ | PHS_COMMAND: DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0])) transfer_pio(regs, cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata); hostdata->state = S_CONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); break; case CSR_XFER_DONE | PHS_STATUS: case CSR_UNEXP | PHS_STATUS: case CSR_SRV_REQ | PHS_STATUS: DB(DB_INTR, printk("STATUS=")) cmd->SCp.Status = read_1_byte(regs); DB(DB_INTR, printk("%02x", cmd->SCp.Status)) if (hostdata->level2 >= L2_BASIC) { sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */ udelay(7); hostdata->state = S_RUNNING_LEVEL2; write_wd33c93(regs, WD_COMMAND_PHASE, 0x50); write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); } else { hostdata->state = S_CONNECTED; } spin_unlock_irqrestore(&hostdata->lock, flags); break; case CSR_XFER_DONE | PHS_MESS_IN: case CSR_UNEXP | PHS_MESS_IN: case CSR_SRV_REQ | PHS_MESS_IN: DB(DB_INTR, printk("MSG_IN=")) msg = read_1_byte(regs); sr = read_wd33c93(regs, WD_SCSI_STATUS); /* clear interrupt */ udelay(7); hostdata->incoming_msg[hostdata->incoming_ptr] = msg; if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE) msg = EXTENDED_MESSAGE; else hostdata->incoming_ptr = 0; cmd->SCp.Message = msg; switch (msg) { case COMMAND_COMPLETE: DB(DB_INTR, printk("CCMP")) write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_PRE_CMP_DISC; break; case SAVE_POINTERS: DB(DB_INTR, printk("SDP")) write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case RESTORE_POINTERS: DB(DB_INTR, printk("RDP")) if (hostdata->level2 >= L2_BASIC) { write_wd33c93(regs, WD_COMMAND_PHASE, 0x45); write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else { write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; case DISCONNECT: DB(DB_INTR, printk("DIS")) cmd->device->disconnect = 1; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_PRE_TMP_DISC; break; case MESSAGE_REJECT: DB(DB_INTR, printk("REJ")) #ifdef SYNC_DEBUG printk("-REJ-"); #endif if (hostdata->sync_stat[cmd->device->id] == SS_WAITING) { hostdata->sync_stat[cmd->device->id] = SS_SET; /* we want default_sx_per, not DEFAULT_SX_PER */ hostdata->sync_xfer[cmd->device->id] = calc_sync_xfer(hostdata->default_sx_per / 4, 0, 0, hostdata->sx_table); } write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case EXTENDED_MESSAGE: DB(DB_INTR, printk("EXT")) ucp = hostdata->incoming_msg; #ifdef SYNC_DEBUG printk("%02x", ucp[hostdata->incoming_ptr]); #endif /* Is this the last byte of the extended message? */ if ((hostdata->incoming_ptr >= 2) && (hostdata->incoming_ptr == (ucp[1] + 1))) { switch (ucp[2]) { /* what's the EXTENDED code? */ case EXTENDED_SDTR: /* default to default async period */ id = calc_sync_xfer(hostdata-> default_sx_per / 4, 0, 0, hostdata->sx_table); if (hostdata->sync_stat[cmd->device->id] != SS_WAITING) { /* A device has sent an unsolicited SDTR message; rather than go * through the effort of decoding it and then figuring out what * our reply should be, we're just gonna say that we have a * synchronous fifo depth of 0. This will result in asynchronous * transfers - not ideal but so much easier. * Actually, this is OK because it assures us that if we don't * specifically ask for sync transfers, we won't do any. */ write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ hostdata->outgoing_msg[0] = EXTENDED_MESSAGE; hostdata->outgoing_msg[1] = 3; hostdata->outgoing_msg[2] = EXTENDED_SDTR; calc_sync_msg(hostdata-> default_sx_per, 0, 0, hostdata->outgoing_msg + 3); hostdata->outgoing_len = 5; } else { if (ucp[4]) /* well, sync transfer */ id = calc_sync_xfer(ucp[3], ucp[4], hostdata->fast, hostdata->sx_table); else if (ucp[3]) /* very unlikely... */ id = calc_sync_xfer(ucp[3], ucp[4], 0, hostdata->sx_table); } hostdata->sync_xfer[cmd->device->id] = id; #ifdef SYNC_DEBUG printk(" sync_xfer=%02x\n", hostdata->sync_xfer[cmd->device->id]); #endif hostdata->sync_stat[cmd->device->id] = SS_SET; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; case EXTENDED_WDTR: write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ printk("sending WDTR "); hostdata->outgoing_msg[0] = EXTENDED_MESSAGE; hostdata->outgoing_msg[1] = 2; hostdata->outgoing_msg[2] = EXTENDED_WDTR; hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */ hostdata->outgoing_len = 4; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; default: write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ printk ("Rejecting Unknown Extended Message(%02x). ", ucp[2]); hostdata->outgoing_msg[0] = MESSAGE_REJECT; hostdata->outgoing_len = 1; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; break; } hostdata->incoming_ptr = 0; } /* We need to read more MESS_IN bytes for the extended message */ else { hostdata->incoming_ptr++; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } break; default: printk("Rejecting Unknown Message(%02x) ", msg); write_wd33c93_cmd(regs, WD_CMD_ASSERT_ATN); /* want MESS_OUT */ hostdata->outgoing_msg[0] = MESSAGE_REJECT; hostdata->outgoing_len = 1; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); hostdata->state = S_CONNECTED; } spin_unlock_irqrestore(&hostdata->lock, flags); break; /* Note: this interrupt will occur only after a LEVEL2 command */ case CSR_SEL_XFER_DONE: /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); if (phs == 0x60) { DB(DB_INTR, printk("SX-DONE")) cmd->SCp.Message = COMMAND_COMPLETE; lun = read_wd33c93(regs, WD_TARGET_LUN); DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun)) hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE) cmd->SCp.Status = lun; if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) cmd->result = (cmd-> result & 0x00ffff) | (DID_ERROR << 16); else cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); cmd->scsi_done(cmd); /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ spin_unlock_irqrestore(&hostdata->lock, flags); wd33c93_execute(instance); } else { printk ("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs); spin_unlock_irqrestore(&hostdata->lock, flags); } break; /* Note: this interrupt will occur only after a LEVEL2 command */ case CSR_SDP: DB(DB_INTR, printk("SDP")) hostdata->state = S_RUNNING_LEVEL2; write_wd33c93(regs, WD_COMMAND_PHASE, 0x41); write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); spin_unlock_irqrestore(&hostdata->lock, flags); break; case CSR_XFER_DONE | PHS_MESS_OUT: case CSR_UNEXP | PHS_MESS_OUT: case CSR_SRV_REQ | PHS_MESS_OUT: DB(DB_INTR, printk("MSG_OUT=")) /* To get here, we've probably requested MESSAGE_OUT and have * already put the correct bytes in outgoing_msg[] and filled * in outgoing_len. We simply send them out to the SCSI bus. * Sometimes we get MESSAGE_OUT phase when we're not expecting * it - like when our SDTR message is rejected by a target. Some * targets send the REJECT before receiving all of the extended * message, and then seem to go back to MESSAGE_OUT for a byte * or two. Not sure why, or if I'm doing something wrong to * cause this to happen. Regardless, it seems that sending * NOP messages in these situations results in no harm and * makes everyone happy. */ if (hostdata->outgoing_len == 0) { hostdata->outgoing_len = 1; hostdata->outgoing_msg[0] = NOP; } transfer_pio(regs, hostdata->outgoing_msg, hostdata->outgoing_len, DATA_OUT_DIR, hostdata); DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0])) hostdata->outgoing_len = 0; hostdata->state = S_CONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); break; case CSR_UNEXP_DISC: /* I think I've seen this after a request-sense that was in response * to an error condition, but not sure. We certainly need to do * something when we get this interrupt - the question is 'what?'. * Let's think positively, and assume some command has finished * in a legal manner (like a command that provokes a request-sense), * so we treat it as a normal command-complete-disconnect. */ /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); if (cmd == NULL) { printk(" - Already disconnected! "); hostdata->state = S_UNCONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); return; } DB(DB_INTR, printk("UNEXP_DISC")) hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); else cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); cmd->scsi_done(cmd); /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ /* look above for comments on scsi_done() */ spin_unlock_irqrestore(&hostdata->lock, flags); wd33c93_execute(instance); break; case CSR_DISC: /* Make sure that reselection is enabled at this point - it may * have been turned off for the command that just completed. */ write_wd33c93(regs, WD_SOURCE_ID, SRCID_ER); DB(DB_INTR, printk("DISC")) if (cmd == NULL) { printk(" - Already disconnected! "); hostdata->state = S_UNCONNECTED; } switch (hostdata->state) { case S_PRE_CMP_DISC: hostdata->connected = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->state = S_UNCONNECTED; DB(DB_INTR, printk(":%d", cmd->SCp.Status)) if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD) cmd->result = (cmd-> result & 0x00ffff) | (DID_ERROR << 16); else cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); cmd->scsi_done(cmd); break; case S_PRE_TMP_DISC: case S_RUNNING_LEVEL2: cmd->host_scribble = (uchar *) hostdata->disconnected_Q; hostdata->disconnected_Q = cmd; hostdata->connected = NULL; hostdata->state = S_UNCONNECTED; #ifdef PROC_STATISTICS hostdata->disc_done_cnt[cmd->device->id]++; #endif break; default: printk("*** Unexpected DISCONNECT interrupt! ***"); hostdata->state = S_UNCONNECTED; } /* We are no longer connected to a target - check to see if * there are commands waiting to be executed. */ spin_unlock_irqrestore(&hostdata->lock, flags); wd33c93_execute(instance); break; case CSR_RESEL_AM: case CSR_RESEL: DB(DB_INTR, printk("RESEL%s", sr == CSR_RESEL_AM ? "_AM" : "")) /* Old chips (pre -A ???) don't have advanced features and will * generate CSR_RESEL. In that case we have to extract the LUN the * hard way (see below). * First we have to make sure this reselection didn't * happen during Arbitration/Selection of some other device. * If yes, put losing command back on top of input_Q. */ if (hostdata->level2 <= L2_NONE) { if (hostdata->selecting) { cmd = (struct scsi_cmnd *) hostdata->selecting; hostdata->selecting = NULL; hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } } else { if (cmd) { if (phs == 0x00) { hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); cmd->host_scribble = (uchar *) hostdata->input_Q; hostdata->input_Q = cmd; } else { printk ("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---", asr, sr, phs); while (1) printk("\r"); } } } /* OK - find out which device reselected us. */ id = read_wd33c93(regs, WD_SOURCE_ID); id &= SRCID_MASK; /* and extract the lun from the ID message. (Note that we don't * bother to check for a valid message here - I guess this is * not the right way to go, but...) */ if (sr == CSR_RESEL_AM) { lun = read_wd33c93(regs, WD_DATA); if (hostdata->level2 < L2_RESELECT) write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); lun &= 7; } else { /* Old chip; wait for msgin phase to pick up the LUN. */ for (lun = 255; lun; lun--) { if ((asr = read_aux_stat(regs)) & ASR_INT) break; udelay(10); } if (!(asr & ASR_INT)) { printk ("wd33c93: Reselected without IDENTIFY\n"); lun = 0; } else { /* Verify this is a change to MSG_IN and read the message */ sr = read_wd33c93(regs, WD_SCSI_STATUS); udelay(7); if (sr == (CSR_ABORT | PHS_MESS_IN) || sr == (CSR_UNEXP | PHS_MESS_IN) || sr == (CSR_SRV_REQ | PHS_MESS_IN)) { /* Got MSG_IN, grab target LUN */ lun = read_1_byte(regs); /* Now we expect a 'paused with ACK asserted' int.. */ asr = read_aux_stat(regs); if (!(asr & ASR_INT)) { udelay(10); asr = read_aux_stat(regs); if (!(asr & ASR_INT)) printk ("wd33c93: No int after LUN on RESEL (%02x)\n", asr); } sr = read_wd33c93(regs, WD_SCSI_STATUS); udelay(7); if (sr != CSR_MSGIN) printk ("wd33c93: Not paused with ACK on RESEL (%02x)\n", sr); lun &= 7; write_wd33c93_cmd(regs, WD_CMD_NEGATE_ACK); } else { printk ("wd33c93: Not MSG_IN on reselect (%02x)\n", sr); lun = 0; } } } /* Now we look for the command that's reconnecting. */ cmd = (struct scsi_cmnd *) hostdata->disconnected_Q; patch = NULL; while (cmd) { if (id == cmd->device->id && lun == cmd->device->lun) break; patch = cmd; cmd = (struct scsi_cmnd *) cmd->host_scribble; } /* Hmm. Couldn't find a valid command.... What to do? */ if (!cmd) { printk ("---TROUBLE: target %d.%d not in disconnect queue---", id, lun); spin_unlock_irqrestore(&hostdata->lock, flags); return; } /* Ok, found the command - now start it up again. */ if (patch) patch->host_scribble = cmd->host_scribble; else hostdata->disconnected_Q = (struct scsi_cmnd *) cmd->host_scribble; hostdata->connected = cmd; /* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]' * because these things are preserved over a disconnect. * But we DO need to fix the DPD bit so it's correct for this command. */ if (cmd->sc_data_direction == DMA_TO_DEVICE) write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id); else write_wd33c93(regs, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD); if (hostdata->level2 >= L2_RESELECT) { write_wd33c93_count(regs, 0); /* we want a DATA_PHASE interrupt */ write_wd33c93(regs, WD_COMMAND_PHASE, 0x45); write_wd33c93_cmd(regs, WD_CMD_SEL_ATN_XFER); hostdata->state = S_RUNNING_LEVEL2; } else hostdata->state = S_CONNECTED; spin_unlock_irqrestore(&hostdata->lock, flags); break; default: printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs); spin_unlock_irqrestore(&hostdata->lock, flags); } DB(DB_INTR, printk("} ")) } static void reset_wd33c93(struct Scsi_Host *instance) { struct WD33C93_hostdata *hostdata = (struct WD33C93_hostdata *) instance->hostdata; const wd33c93_regs regs = hostdata->regs; uchar sr; #ifdef CONFIG_SGI_IP22 { int busycount = 0; extern void sgiwd93_reset(unsigned long); /* wait 'til the chip gets some time for us */ while ((read_aux_stat(regs) & ASR_BSY) && busycount++ < 100) udelay (10); /* * there are scsi devices out there, which manage to lock up * the wd33c93 in a busy condition. In this state it won't * accept the reset command. The only way to solve this is to * give the chip a hardware reset (if possible). The code below * does this for the SGI Indy, where this is possible */ /* still busy ? */ if (read_aux_stat(regs) & ASR_BSY) sgiwd93_reset(instance->base); /* yeah, give it the hard one */ } #endif write_wd33c93(regs, WD_OWN_ID, OWNID_EAF | OWNID_RAF | instance->this_id | hostdata->clock_freq); write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER, calc_sync_xfer(hostdata->default_sx_per / 4, DEFAULT_SX_OFF, 0, hostdata->sx_table)); write_wd33c93(regs, WD_COMMAND, WD_CMD_RESET); #ifdef CONFIG_MVME147_SCSI udelay(25); /* The old wd33c93 on MVME147 needs this, at least */ #endif while (!(read_aux_stat(regs) & ASR_INT)) ; sr = read_wd33c93(regs, WD_SCSI_STATUS); hostdata->microcode = read_wd33c93(regs, WD_CDB_1); if (sr == 0x00) hostdata->chip = C_WD33C93; else if (sr == 0x01) { write_wd33c93(regs, WD_QUEUE_TAG, 0xa5); /* any random number */ sr = read_wd33c93(regs, WD_QUEUE_TAG); if (sr == 0xa5) { hostdata->chip = C_WD33C93B; write_wd33c93(regs, WD_QUEUE_TAG, 0); } else hostdata->chip = C_WD33C93A; } else hostdata->chip = C_UNKNOWN_CHIP; if (hostdata->chip != C_WD33C93B) /* Fast SCSI unavailable */ hostdata->fast = 0; write_wd33c93(regs, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE); write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); } int wd33c93_host_reset(struct scsi_cmnd * SCpnt) { struct Scsi_Host *instance; struct WD33C93_hostdata *hostdata; int i; instance = SCpnt->device->host; hostdata = (struct WD33C93_hostdata *) instance->hostdata; printk("scsi%d: reset. ", instance->host_no); disable_irq(instance->irq); hostdata->dma_stop(instance, NULL, 0); for (i = 0; i < 8; i++) { hostdata->busy[i] = 0; hostdata->sync_xfer[i] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF, 0, hostdata->sx_table); hostdata->sync_stat[i] = SS_UNSET; /* using default sync values */ } hostdata->input_Q = NULL; hostdata->selecting = NULL; hostdata->connected = NULL; hostdata->disconnected_Q = NULL; hostdata->state = S_UNCONNECTED; hostdata->dma = D_DMA_OFF; hostdata->incoming_ptr = 0; hostdata->outgoing_len = 0; reset_wd33c93(instance); SCpnt->result = DID_RESET << 16; enable_irq(instance->irq); return SUCCESS; } int wd33c93_abort(struct scsi_cmnd * cmd) { struct Scsi_Host *instance; struct WD33C93_hostdata *hostdata; wd33c93_regs regs; struct scsi_cmnd *tmp, *prev; disable_irq(cmd->device->host->irq); instance = cmd->device->host; hostdata = (struct WD33C93_hostdata *) instance->hostdata; regs = hostdata->regs; /* * Case 1 : If the command hasn't been issued yet, we simply remove it * from the input_Q. */ tmp = (struct scsi_cmnd *) hostdata->input_Q; prev = NULL; while (tmp) { if (tmp == cmd) { if (prev) prev->host_scribble = cmd->host_scribble; else hostdata->input_Q = (struct scsi_cmnd *) cmd->host_scribble; cmd->host_scribble = NULL; cmd->result = DID_ABORT << 16; printk ("scsi%d: Abort - removing command from input_Q. ", instance->host_no); enable_irq(cmd->device->host->irq); cmd->scsi_done(cmd); return SUCCESS; } prev = tmp; tmp = (struct scsi_cmnd *) tmp->host_scribble; } /* * Case 2 : If the command is connected, we're going to fail the abort * and let the high level SCSI driver retry at a later time or * issue a reset. * * Timeouts, and therefore aborted commands, will be highly unlikely * and handling them cleanly in this situation would make the common * case of noresets less efficient, and would pollute our code. So, * we fail. */ if (hostdata->connected == cmd) { uchar sr, asr; unsigned long timeout; printk("scsi%d: Aborting connected command - ", instance->host_no); printk("stopping DMA - "); if (hostdata->dma == D_DMA_RUNNING) { hostdata->dma_stop(instance, cmd, 0); hostdata->dma = D_DMA_OFF; } printk("sending wd33c93 ABORT command - "); write_wd33c93(regs, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED); write_wd33c93_cmd(regs, WD_CMD_ABORT); /* Now we have to attempt to flush out the FIFO... */ printk("flushing fifo - "); timeout = 1000000; do { asr = read_aux_stat(regs); if (asr & ASR_DBR) read_wd33c93(regs, WD_DATA); } while (!(asr & ASR_INT) && timeout-- > 0); sr = read_wd33c93(regs, WD_SCSI_STATUS); printk ("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ", asr, sr, read_wd33c93_count(regs), timeout); /* * Abort command processed. * Still connected. * We must disconnect. */ printk("sending wd33c93 DISCONNECT command - "); write_wd33c93_cmd(regs, WD_CMD_DISCONNECT); timeout = 1000000; asr = read_aux_stat(regs); while ((asr & ASR_CIP) && timeout-- > 0) asr = read_aux_stat(regs); sr = read_wd33c93(regs, WD_SCSI_STATUS); printk("asr=%02x, sr=%02x.", asr, sr); hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); hostdata->connected = NULL; hostdata->state = S_UNCONNECTED; cmd->result = DID_ABORT << 16; /* sti();*/ wd33c93_execute(instance); enable_irq(cmd->device->host->irq); cmd->scsi_done(cmd); return SUCCESS; } /* * Case 3: If the command is currently disconnected from the bus, * we're not going to expend much effort here: Let's just return * an ABORT_SNOOZE and hope for the best... */ tmp = (struct scsi_cmnd *) hostdata->disconnected_Q; while (tmp) { if (tmp == cmd) { printk ("scsi%d: Abort - command found on disconnected_Q - ", instance->host_no); printk("Abort SNOOZE. "); enable_irq(cmd->device->host->irq); return FAILED; } tmp = (struct scsi_cmnd *) tmp->host_scribble; } /* * Case 4 : If we reached this point, the command was not found in any of * the queues. * * We probably reached this point because of an unlikely race condition * between the command completing successfully and the abortion code, * so we won't panic, but we will notify the user in case something really * broke. */ /* sti();*/ wd33c93_execute(instance); enable_irq(cmd->device->host->irq); printk("scsi%d: warning : SCSI command probably completed successfully" " before abortion. ", instance->host_no); return FAILED; } #define MAX_WD33C93_HOSTS 4 #define MAX_SETUP_ARGS ARRAY_SIZE(setup_args) #define SETUP_BUFFER_SIZE 200 static char setup_buffer[SETUP_BUFFER_SIZE]; static char setup_used[MAX_SETUP_ARGS]; static int done_setup = 0; static int wd33c93_setup(char *str) { int i; char *p1, *p2; /* The kernel does some processing of the command-line before calling * this function: If it begins with any decimal or hex number arguments, * ints[0] = how many numbers found and ints[1] through [n] are the values * themselves. str points to where the non-numeric arguments (if any) * start: We do our own parsing of those. We construct synthetic 'nosync' * keywords out of numeric args (to maintain compatibility with older * versions) and then add the rest of the arguments. */ p1 = setup_buffer; *p1 = '\0'; if (str) strncpy(p1, str, SETUP_BUFFER_SIZE - strlen(setup_buffer)); setup_buffer[SETUP_BUFFER_SIZE - 1] = '\0'; p1 = setup_buffer; i = 0; while (*p1 && (i < MAX_SETUP_ARGS)) { p2 = strchr(p1, ','); if (p2) { *p2 = '\0'; if (p1 != p2) setup_args[i] = p1; p1 = p2 + 1; i++; } else { setup_args[i] = p1; break; } } for (i = 0; i < MAX_SETUP_ARGS; i++) setup_used[i] = 0; done_setup = 1; return 1; } __setup("wd33c93=", wd33c93_setup); /* check_setup_args() returns index if key found, 0 if not */ static int check_setup_args(char *key, int *flags, int *val, char *buf) { int x; char *cp; for (x = 0; x < MAX_SETUP_ARGS; x++) { if (setup_used[x]) continue; if (!strncmp(setup_args[x], key, strlen(key))) break; if (!strncmp(setup_args[x], "next", strlen("next"))) return 0; } if (x == MAX_SETUP_ARGS) return 0; setup_used[x] = 1; cp = setup_args[x] + strlen(key); *val = -1; if (*cp != ':') return ++x; cp++; if ((*cp >= '0') && (*cp <= '9')) { *val = simple_strtoul(cp, NULL, 0); } return ++x; } /* * Calculate internal data-transfer-clock cycle from input-clock * frequency (/MHz) and fill 'sx_table'. * * The original driver used to rely on a fixed sx_table, containing periods * for (only) the lower limits of the respective input-clock-frequency ranges * (8-10/12-15/16-20 MHz). Although it seems, that no problems occurred with * this setting so far, it might be desirable to adjust the transfer periods * closer to the really attached, possibly 25% higher, input-clock, since * - the wd33c93 may really use a significant shorter period, than it has * negotiated (eg. thrashing the target, which expects 4/8MHz, with 5/10MHz * instead). * - the wd33c93 may ask the target for a lower transfer rate, than the target * is capable of (eg. negotiating for an assumed minimum of 252ns instead of * possible 200ns, which indeed shows up in tests as an approx. 10% lower * transfer rate). */ static inline unsigned int round_4(unsigned int x) { switch (x & 3) { case 1: --x; break; case 2: ++x; case 3: ++x; } return x; } static void calc_sx_table(unsigned int mhz, struct sx_period sx_table[9]) { unsigned int d, i; if (mhz < 11) d = 2; /* divisor for 8-10 MHz input-clock */ else if (mhz < 16) d = 3; /* divisor for 12-15 MHz input-clock */ else d = 4; /* divisor for 16-20 MHz input-clock */ d = (100000 * d) / 2 / mhz; /* 100 x DTCC / nanosec */ sx_table[0].period_ns = 1; sx_table[0].reg_value = 0x20; for (i = 1; i < 8; i++) { sx_table[i].period_ns = round_4((i+1)*d / 100); sx_table[i].reg_value = (i+1)*0x10; } sx_table[7].reg_value = 0; sx_table[8].period_ns = 0; sx_table[8].reg_value = 0; } /* * check and, maybe, map an init- or "clock:"- argument. */ static uchar set_clk_freq(int freq, int *mhz) { int x = freq; if (WD33C93_FS_8_10 == freq) freq = 8; else if (WD33C93_FS_12_15 == freq) freq = 12; else if (WD33C93_FS_16_20 == freq) freq = 16; else if (freq > 7 && freq < 11) x = WD33C93_FS_8_10; else if (freq > 11 && freq < 16) x = WD33C93_FS_12_15; else if (freq > 15 && freq < 21) x = WD33C93_FS_16_20; else { /* Hmm, wouldn't it be safer to assume highest freq here? */ x = WD33C93_FS_8_10; freq = 8; } *mhz = freq; return x; } /* * to be used with the resync: fast: ... options */ static inline void set_resync ( struct WD33C93_hostdata *hd, int mask ) { int i; for (i = 0; i < 8; i++) if (mask & (1 << i)) hd->sync_stat[i] = SS_UNSET; } void wd33c93_init(struct Scsi_Host *instance, const wd33c93_regs regs, dma_setup_t setup, dma_stop_t stop, int clock_freq) { struct WD33C93_hostdata *hostdata; int i; int flags; int val; char buf[32]; if (!done_setup && setup_strings) wd33c93_setup(setup_strings); hostdata = (struct WD33C93_hostdata *) instance->hostdata; hostdata->regs = regs; hostdata->clock_freq = set_clk_freq(clock_freq, &i); calc_sx_table(i, hostdata->sx_table); hostdata->dma_setup = setup; hostdata->dma_stop = stop; hostdata->dma_bounce_buffer = NULL; hostdata->dma_bounce_len = 0; for (i = 0; i < 8; i++) { hostdata->busy[i] = 0; hostdata->sync_xfer[i] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF, 0, hostdata->sx_table); hostdata->sync_stat[i] = SS_UNSET; /* using default sync values */ #ifdef PROC_STATISTICS hostdata->cmd_cnt[i] = 0; hostdata->disc_allowed_cnt[i] = 0; hostdata->disc_done_cnt[i] = 0; #endif } hostdata->input_Q = NULL; hostdata->selecting = NULL; hostdata->connected = NULL; hostdata->disconnected_Q = NULL; hostdata->state = S_UNCONNECTED; hostdata->dma = D_DMA_OFF; hostdata->level2 = L2_BASIC; hostdata->disconnect = DIS_ADAPTIVE; hostdata->args = DEBUG_DEFAULTS; hostdata->incoming_ptr = 0; hostdata->outgoing_len = 0; hostdata->default_sx_per = DEFAULT_SX_PER; hostdata->no_dma = 0; /* default is DMA enabled */ #ifdef PROC_INTERFACE hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP; #ifdef PROC_STATISTICS hostdata->dma_cnt = 0; hostdata->pio_cnt = 0; hostdata->int_cnt = 0; #endif #endif if (check_setup_args("clock", &flags, &val, buf)) { hostdata->clock_freq = set_clk_freq(val, &val); calc_sx_table(val, hostdata->sx_table); } if (check_setup_args("nosync", &flags, &val, buf)) hostdata->no_sync = val; if (check_setup_args("nodma", &flags, &val, buf)) hostdata->no_dma = (val == -1) ? 1 : val; if (check_setup_args("period", &flags, &val, buf)) hostdata->default_sx_per = hostdata->sx_table[round_period((unsigned int) val, hostdata->sx_table)].period_ns; if (check_setup_args("disconnect", &flags, &val, buf)) { if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS)) hostdata->disconnect = val; else hostdata->disconnect = DIS_ADAPTIVE; } if (check_setup_args("level2", &flags, &val, buf)) hostdata->level2 = val; if (check_setup_args("debug", &flags, &val, buf)) hostdata->args = val & DB_MASK; if (check_setup_args("burst", &flags, &val, buf)) hostdata->dma_mode = val ? CTRL_BURST:CTRL_DMA; if (WD33C93_FS_16_20 == hostdata->clock_freq /* divisor 4 */ && check_setup_args("fast", &flags, &val, buf)) hostdata->fast = !!val; if ((i = check_setup_args("next", &flags, &val, buf))) { while (i) setup_used[--i] = 1; } #ifdef PROC_INTERFACE if (check_setup_args("proc", &flags, &val, buf)) hostdata->proc = val; #endif spin_lock_irq(&hostdata->lock); reset_wd33c93(instance); spin_unlock_irq(&hostdata->lock); printk("wd33c93-%d: chip=%s/%d no_sync=0x%x no_dma=%d", instance->host_no, (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip == C_WD33C93A) ? "WD33c93A" : (hostdata->chip == C_WD33C93B) ? "WD33c93B" : "unknown", hostdata->microcode, hostdata->no_sync, hostdata->no_dma); #ifdef DEBUGGING_ON printk(" debug_flags=0x%02x\n", hostdata->args); #else printk(" debugging=OFF\n"); #endif printk(" setup_args="); for (i = 0; i < MAX_SETUP_ARGS; i++) printk("%s,", setup_args[i]); printk("\n"); printk(" Version %s - %s\n", WD33C93_VERSION, WD33C93_DATE); } int wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off, int len, int in) { #ifdef PROC_INTERFACE char *bp; char tbuf[128]; struct WD33C93_hostdata *hd; struct scsi_cmnd *cmd; int x; static int stop = 0; hd = (struct WD33C93_hostdata *) instance->hostdata; /* If 'in' is TRUE we need to _read_ the proc file. We accept the following * keywords (same format as command-line, but arguments are not optional): * debug * disconnect * period * resync * proc * nodma * level2 * burst * fast * nosync */ if (in) { buf[len] = '\0'; for (bp = buf; *bp; ) { while (',' == *bp || ' ' == *bp) ++bp; if (!strncmp(bp, "debug:", 6)) { hd->args = simple_strtoul(bp+6, &bp, 0) & DB_MASK; } else if (!strncmp(bp, "disconnect:", 11)) { x = simple_strtoul(bp+11, &bp, 0); if (x < DIS_NEVER || x > DIS_ALWAYS) x = DIS_ADAPTIVE; hd->disconnect = x; } else if (!strncmp(bp, "period:", 7)) { x = simple_strtoul(bp+7, &bp, 0); hd->default_sx_per = hd->sx_table[round_period((unsigned int) x, hd->sx_table)].period_ns; } else if (!strncmp(bp, "resync:", 7)) { set_resync(hd, (int)simple_strtoul(bp+7, &bp, 0)); } else if (!strncmp(bp, "proc:", 5)) { hd->proc = simple_strtoul(bp+5, &bp, 0); } else if (!strncmp(bp, "nodma:", 6)) { hd->no_dma = simple_strtoul(bp+6, &bp, 0); } else if (!strncmp(bp, "level2:", 7)) { hd->level2 = simple_strtoul(bp+7, &bp, 0); } else if (!strncmp(bp, "burst:", 6)) { hd->dma_mode = simple_strtol(bp+6, &bp, 0) ? CTRL_BURST:CTRL_DMA; } else if (!strncmp(bp, "fast:", 5)) { x = !!simple_strtol(bp+5, &bp, 0); if (x != hd->fast) set_resync(hd, 0xff); hd->fast = x; } else if (!strncmp(bp, "nosync:", 7)) { x = simple_strtoul(bp+7, &bp, 0); set_resync(hd, x ^ hd->no_sync); hd->no_sync = x; } else { break; /* unknown keyword,syntax-error,... */ } } return len; } spin_lock_irq(&hd->lock); bp = buf; *bp = '\0'; if (hd->proc & PR_VERSION) { sprintf(tbuf, "\nVersion %s - %s.", WD33C93_VERSION, WD33C93_DATE); strcat(bp, tbuf); } if (hd->proc & PR_INFO) { sprintf(tbuf, "\nclock_freq=%02x no_sync=%02x no_dma=%d" " dma_mode=%02x fast=%d", hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast); strcat(bp, tbuf); strcat(bp, "\nsync_xfer[] = "); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%02x", hd->sync_xfer[x]); strcat(bp, tbuf); } strcat(bp, "\nsync_stat[] = "); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%02x", hd->sync_stat[x]); strcat(bp, tbuf); } } #ifdef PROC_STATISTICS if (hd->proc & PR_STATISTICS) { strcat(bp, "\ncommands issued: "); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%ld", hd->cmd_cnt[x]); strcat(bp, tbuf); } strcat(bp, "\ndisconnects allowed:"); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%ld", hd->disc_allowed_cnt[x]); strcat(bp, tbuf); } strcat(bp, "\ndisconnects done: "); for (x = 0; x < 7; x++) { sprintf(tbuf, "\t%ld", hd->disc_done_cnt[x]); strcat(bp, tbuf); } sprintf(tbuf, "\ninterrupts: %ld, DATA_PHASE ints: %ld DMA, %ld PIO", hd->int_cnt, hd->dma_cnt, hd->pio_cnt); strcat(bp, tbuf); } #endif if (hd->proc & PR_CONNECTED) { strcat(bp, "\nconnected: "); if (hd->connected) { cmd = (struct scsi_cmnd *) hd->connected; sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); } } if (hd->proc & PR_INPUTQ) { strcat(bp, "\ninput_Q: "); cmd = (struct scsi_cmnd *) hd->input_Q; while (cmd) { sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); cmd = (struct scsi_cmnd *) cmd->host_scribble; } } if (hd->proc & PR_DISCQ) { strcat(bp, "\ndisconnected_Q:"); cmd = (struct scsi_cmnd *) hd->disconnected_Q; while (cmd) { sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]); strcat(bp, tbuf); cmd = (struct scsi_cmnd *) cmd->host_scribble; } } strcat(bp, "\n"); spin_unlock_irq(&hd->lock); *start = buf; if (stop) { stop = 0; return 0; } if (off > 0x40000) /* ALWAYS stop after 256k bytes have been read */ stop = 1; if (hd->proc & PR_STOP) /* stop every other time */ stop = 1; return strlen(bp); #else /* PROC_INTERFACE */ return 0; #endif /* PROC_INTERFACE */ } EXPORT_SYMBOL(wd33c93_host_reset); EXPORT_SYMBOL(wd33c93_init); EXPORT_SYMBOL(wd33c93_abort); EXPORT_SYMBOL(wd33c93_queuecommand); EXPORT_SYMBOL(wd33c93_intr); EXPORT_SYMBOL(wd33c93_proc_info);
gpl-2.0
GeyerA/kernel_hammerhead
drivers/usb/host/whci/init.c
9719
5014
/* * Wireless Host Controller (WHC) initialization. * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/dma-mapping.h> #include <linux/uwb/umc.h> #include "../../wusbcore/wusbhc.h" #include "whcd.h" /* * Reset the host controller. */ static void whc_hw_reset(struct whc *whc) { le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD); whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0, 100, "reset"); } static void whc_hw_init_di_buf(struct whc *whc) { int d; /* Disable all entries in the Device Information buffer. */ for (d = 0; d < whc->n_devices; d++) whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE; le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR); } static void whc_hw_init_dn_buf(struct whc *whc) { /* Clear the Device Notification buffer to ensure the V (valid) * bits are clear. */ memset(whc->dn_buf, 0, 4096); le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR); } int whc_init(struct whc *whc) { u32 whcsparams; int ret, i; resource_size_t start, len; spin_lock_init(&whc->lock); mutex_init(&whc->mutex); init_waitqueue_head(&whc->cmd_wq); init_waitqueue_head(&whc->async_list_wq); init_waitqueue_head(&whc->periodic_list_wq); whc->workqueue = create_singlethread_workqueue(dev_name(&whc->umc->dev)); if (whc->workqueue == NULL) { ret = -ENOMEM; goto error; } INIT_WORK(&whc->dn_work, whc_dn_work); INIT_WORK(&whc->async_work, scan_async_work); INIT_LIST_HEAD(&whc->async_list); INIT_LIST_HEAD(&whc->async_removed_list); INIT_WORK(&whc->periodic_work, scan_periodic_work); for (i = 0; i < 5; i++) INIT_LIST_HEAD(&whc->periodic_list[i]); INIT_LIST_HEAD(&whc->periodic_removed_list); /* Map HC registers. */ start = whc->umc->resource.start; len = whc->umc->resource.end - start + 1; if (!request_mem_region(start, len, "whci-hc")) { dev_err(&whc->umc->dev, "can't request HC region\n"); ret = -EBUSY; goto error; } whc->base_phys = start; whc->base = ioremap(start, len); if (!whc->base) { dev_err(&whc->umc->dev, "ioremap\n"); ret = -ENOMEM; goto error; } whc_hw_reset(whc); /* Read maximum number of devices, keys and MMC IEs. */ whcsparams = le_readl(whc->base + WHCSPARAMS); whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams); whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams); whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams); dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n", whc->n_devices, whc->n_keys, whc->n_mmc_ies); whc->qset_pool = dma_pool_create("qset", &whc->umc->dev, sizeof(struct whc_qset), 64, 0); if (whc->qset_pool == NULL) { ret = -ENOMEM; goto error; } ret = asl_init(whc); if (ret < 0) goto error; ret = pzl_init(whc); if (ret < 0) goto error; /* Allocate and initialize a buffer for generic commands, the Device Information buffer, and the Device Notification buffer. */ whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN, &whc->gen_cmd_buf_dma, GFP_KERNEL); if (whc->gen_cmd_buf == NULL) { ret = -ENOMEM; goto error; } whc->dn_buf = dma_alloc_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES, &whc->dn_buf_dma, GFP_KERNEL); if (!whc->dn_buf) { ret = -ENOMEM; goto error; } whc_hw_init_dn_buf(whc); whc->di_buf = dma_alloc_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices, &whc->di_buf_dma, GFP_KERNEL); if (!whc->di_buf) { ret = -ENOMEM; goto error; } whc_hw_init_di_buf(whc); return 0; error: whc_clean_up(whc); return ret; } void whc_clean_up(struct whc *whc) { resource_size_t len; if (whc->di_buf) dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices, whc->di_buf, whc->di_buf_dma); if (whc->dn_buf) dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES, whc->dn_buf, whc->dn_buf_dma); if (whc->gen_cmd_buf) dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN, whc->gen_cmd_buf, whc->gen_cmd_buf_dma); pzl_clean_up(whc); asl_clean_up(whc); if (whc->qset_pool) dma_pool_destroy(whc->qset_pool); len = resource_size(&whc->umc->resource); if (whc->base) iounmap(whc->base); if (whc->base_phys) release_mem_region(whc->base_phys, len); if (whc->workqueue) destroy_workqueue(whc->workqueue); }
gpl-2.0
StelixROM/kernel_lge_msm8974
drivers/char/hw_random/mxc-rnga.c
9719
5804
/* * RNG driver for Freescale RNGA * * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved. * Author: Alan Carvalho de Assis <acassis@gmail.com> */ /* * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html * * This driver is based on other RNG drivers. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/hw_random.h> #include <linux/io.h> /* RNGA Registers */ #define RNGA_CONTROL 0x00 #define RNGA_STATUS 0x04 #define RNGA_ENTROPY 0x08 #define RNGA_OUTPUT_FIFO 0x0c #define RNGA_MODE 0x10 #define RNGA_VERIFICATION_CONTROL 0x14 #define RNGA_OSC_CONTROL_COUNTER 0x18 #define RNGA_OSC1_COUNTER 0x1c #define RNGA_OSC2_COUNTER 0x20 #define RNGA_OSC_COUNTER_STATUS 0x24 /* RNGA Registers Range */ #define RNG_ADDR_RANGE 0x28 /* RNGA Control Register */ #define RNGA_CONTROL_SLEEP 0x00000010 #define RNGA_CONTROL_CLEAR_INT 0x00000008 #define RNGA_CONTROL_MASK_INTS 0x00000004 #define RNGA_CONTROL_HIGH_ASSURANCE 0x00000002 #define RNGA_CONTROL_GO 0x00000001 #define RNGA_STATUS_LEVEL_MASK 0x0000ff00 /* RNGA Status Register */ #define RNGA_STATUS_OSC_DEAD 0x80000000 #define RNGA_STATUS_SLEEP 0x00000010 #define RNGA_STATUS_ERROR_INT 0x00000008 #define RNGA_STATUS_FIFO_UNDERFLOW 0x00000004 #define RNGA_STATUS_LAST_READ_STATUS 0x00000002 #define RNGA_STATUS_SECURITY_VIOLATION 0x00000001 static struct platform_device *rng_dev; static int mxc_rnga_data_present(struct hwrng *rng) { int level; void __iomem *rng_base = (void __iomem *)rng->priv; /* how many random numbers is in FIFO? [0-16] */ level = ((__raw_readl(rng_base + RNGA_STATUS) & RNGA_STATUS_LEVEL_MASK) >> 8); return level > 0 ? 1 : 0; } static int mxc_rnga_data_read(struct hwrng *rng, u32 * data) { int err; u32 ctrl; void __iomem *rng_base = (void __iomem *)rng->priv; /* retrieve a random number from FIFO */ *data = __raw_readl(rng_base + RNGA_OUTPUT_FIFO); /* some error while reading this random number? */ err = __raw_readl(rng_base + RNGA_STATUS) & RNGA_STATUS_ERROR_INT; /* if error: clear error interrupt, but doesn't return random number */ if (err) { dev_dbg(&rng_dev->dev, "Error while reading random number!\n"); ctrl = __raw_readl(rng_base + RNGA_CONTROL); __raw_writel(ctrl | RNGA_CONTROL_CLEAR_INT, rng_base + RNGA_CONTROL); return 0; } else return 4; } static int mxc_rnga_init(struct hwrng *rng) { u32 ctrl, osc; void __iomem *rng_base = (void __iomem *)rng->priv; /* wake up */ ctrl = __raw_readl(rng_base + RNGA_CONTROL); __raw_writel(ctrl & ~RNGA_CONTROL_SLEEP, rng_base + RNGA_CONTROL); /* verify if oscillator is working */ osc = __raw_readl(rng_base + RNGA_STATUS); if (osc & RNGA_STATUS_OSC_DEAD) { dev_err(&rng_dev->dev, "RNGA Oscillator is dead!\n"); return -ENODEV; } /* go running */ ctrl = __raw_readl(rng_base + RNGA_CONTROL); __raw_writel(ctrl | RNGA_CONTROL_GO, rng_base + RNGA_CONTROL); return 0; } static void mxc_rnga_cleanup(struct hwrng *rng) { u32 ctrl; void __iomem *rng_base = (void __iomem *)rng->priv; ctrl = __raw_readl(rng_base + RNGA_CONTROL); /* stop rnga */ __raw_writel(ctrl & ~RNGA_CONTROL_GO, rng_base + RNGA_CONTROL); } static struct hwrng mxc_rnga = { .name = "mxc-rnga", .init = mxc_rnga_init, .cleanup = mxc_rnga_cleanup, .data_present = mxc_rnga_data_present, .data_read = mxc_rnga_data_read }; static int __init mxc_rnga_probe(struct platform_device *pdev) { int err = -ENODEV; struct clk *clk; struct resource *res, *mem; void __iomem *rng_base = NULL; if (rng_dev) return -EBUSY; clk = clk_get(&pdev->dev, "rng"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "Could not get rng_clk!\n"); err = PTR_ERR(clk); goto out; } clk_enable(clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { err = -ENOENT; goto err_region; } mem = request_mem_region(res->start, resource_size(res), pdev->name); if (mem == NULL) { err = -EBUSY; goto err_region; } rng_base = ioremap(res->start, resource_size(res)); if (!rng_base) { err = -ENOMEM; goto err_ioremap; } mxc_rnga.priv = (unsigned long)rng_base; err = hwrng_register(&mxc_rnga); if (err) { dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err); goto err_register; } rng_dev = pdev; dev_info(&pdev->dev, "MXC RNGA Registered.\n"); return 0; err_register: iounmap(rng_base); rng_base = NULL; err_ioremap: release_mem_region(res->start, resource_size(res)); err_region: clk_disable(clk); clk_put(clk); out: return err; } static int __exit mxc_rnga_remove(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); void __iomem *rng_base = (void __iomem *)mxc_rnga.priv; struct clk *clk = clk_get(&pdev->dev, "rng"); hwrng_unregister(&mxc_rnga); iounmap(rng_base); release_mem_region(res->start, resource_size(res)); clk_disable(clk); clk_put(clk); return 0; } static struct platform_driver mxc_rnga_driver = { .driver = { .name = "mxc_rnga", .owner = THIS_MODULE, }, .remove = __exit_p(mxc_rnga_remove), }; static int __init mod_init(void) { return platform_driver_probe(&mxc_rnga_driver, mxc_rnga_probe); } static void __exit mod_exit(void) { platform_driver_unregister(&mxc_rnga_driver); } module_init(mod_init); module_exit(mod_exit); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("H/W RNGA driver for i.MX"); MODULE_LICENSE("GPL");
gpl-2.0
riskey95/kernel-lge-p715
drivers/media/video/sn9c102/sn9c102_hv7131d.c
12023
7085
/*************************************************************************** * Plug-in for HV7131D image sensor connected to the SN9C1xx PC Camera * * Controllers * * * * Copyright (C) 2004-2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include "sn9c102_sensor.h" #include "sn9c102_devtable.h" static int hv7131d_init(struct sn9c102_device* cam) { int err; err = sn9c102_write_const_regs(cam, {0x00, 0x10}, {0x00, 0x11}, {0x00, 0x14}, {0x60, 0x17}, {0x0e, 0x18}, {0xf2, 0x19}); err += sn9c102_i2c_write(cam, 0x01, 0x04); err += sn9c102_i2c_write(cam, 0x02, 0x00); err += sn9c102_i2c_write(cam, 0x28, 0x00); return err; } static int hv7131d_get_ctrl(struct sn9c102_device* cam, struct v4l2_control* ctrl) { switch (ctrl->id) { case V4L2_CID_EXPOSURE: { int r1 = sn9c102_i2c_read(cam, 0x26), r2 = sn9c102_i2c_read(cam, 0x27); if (r1 < 0 || r2 < 0) return -EIO; ctrl->value = (r1 << 8) | (r2 & 0xff); } return 0; case V4L2_CID_RED_BALANCE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x31)) < 0) return -EIO; ctrl->value = 0x3f - (ctrl->value & 0x3f); return 0; case V4L2_CID_BLUE_BALANCE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x33)) < 0) return -EIO; ctrl->value = 0x3f - (ctrl->value & 0x3f); return 0; case SN9C102_V4L2_CID_GREEN_BALANCE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x32)) < 0) return -EIO; ctrl->value = 0x3f - (ctrl->value & 0x3f); return 0; case SN9C102_V4L2_CID_RESET_LEVEL: if ((ctrl->value = sn9c102_i2c_read(cam, 0x30)) < 0) return -EIO; ctrl->value &= 0x3f; return 0; case SN9C102_V4L2_CID_PIXEL_BIAS_VOLTAGE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x34)) < 0) return -EIO; ctrl->value &= 0x07; return 0; default: return -EINVAL; } } static int hv7131d_set_ctrl(struct sn9c102_device* cam, const struct v4l2_control* ctrl) { int err = 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: err += sn9c102_i2c_write(cam, 0x26, ctrl->value >> 8); err += sn9c102_i2c_write(cam, 0x27, ctrl->value & 0xff); break; case V4L2_CID_RED_BALANCE: err += sn9c102_i2c_write(cam, 0x31, 0x3f - ctrl->value); break; case V4L2_CID_BLUE_BALANCE: err += sn9c102_i2c_write(cam, 0x33, 0x3f - ctrl->value); break; case SN9C102_V4L2_CID_GREEN_BALANCE: err += sn9c102_i2c_write(cam, 0x32, 0x3f - ctrl->value); break; case SN9C102_V4L2_CID_RESET_LEVEL: err += sn9c102_i2c_write(cam, 0x30, ctrl->value); break; case SN9C102_V4L2_CID_PIXEL_BIAS_VOLTAGE: err += sn9c102_i2c_write(cam, 0x34, ctrl->value); break; default: return -EINVAL; } return err ? -EIO : 0; } static int hv7131d_set_crop(struct sn9c102_device* cam, const struct v4l2_rect* rect) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 2, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 2; err += sn9c102_write_reg(cam, h_start, 0x12); err += sn9c102_write_reg(cam, v_start, 0x13); return err; } static int hv7131d_set_pix_format(struct sn9c102_device* cam, const struct v4l2_pix_format* pix) { int err = 0; if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X) err += sn9c102_write_reg(cam, 0x42, 0x19); else err += sn9c102_write_reg(cam, 0xf2, 0x19); return err; } static const struct sn9c102_sensor hv7131d = { .name = "HV7131D", .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102, .sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE, .frequency = SN9C102_I2C_100KHZ, .interface = SN9C102_I2C_2WIRES, .i2c_slave_id = 0x11, .init = &hv7131d_init, .qctrl = { { .id = V4L2_CID_EXPOSURE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "exposure", .minimum = 0x0250, .maximum = 0xffff, .step = 0x0001, .default_value = 0x0250, .flags = 0, }, { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "red balance", .minimum = 0x00, .maximum = 0x3f, .step = 0x01, .default_value = 0x00, .flags = 0, }, { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "blue balance", .minimum = 0x00, .maximum = 0x3f, .step = 0x01, .default_value = 0x20, .flags = 0, }, { .id = SN9C102_V4L2_CID_GREEN_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "green balance", .minimum = 0x00, .maximum = 0x3f, .step = 0x01, .default_value = 0x1e, .flags = 0, }, { .id = SN9C102_V4L2_CID_RESET_LEVEL, .type = V4L2_CTRL_TYPE_INTEGER, .name = "reset level", .minimum = 0x19, .maximum = 0x3f, .step = 0x01, .default_value = 0x30, .flags = 0, }, { .id = SN9C102_V4L2_CID_PIXEL_BIAS_VOLTAGE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "pixel bias voltage", .minimum = 0x00, .maximum = 0x07, .step = 0x01, .default_value = 0x02, .flags = 0, }, }, .get_ctrl = &hv7131d_get_ctrl, .set_ctrl = &hv7131d_set_ctrl, .cropcap = { .bounds = { .left = 0, .top = 0, .width = 640, .height = 480, }, .defrect = { .left = 0, .top = 0, .width = 640, .height = 480, }, }, .set_crop = &hv7131d_set_crop, .pix_format = { .width = 640, .height = 480, .pixelformat = V4L2_PIX_FMT_SBGGR8, .priv = 8, }, .set_pix_format = &hv7131d_set_pix_format }; int sn9c102_probe_hv7131d(struct sn9c102_device* cam) { int r0 = 0, r1 = 0, err; err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x00, 0x01}, {0x28, 0x17}); r0 = sn9c102_i2c_try_read(cam, &hv7131d, 0x00); r1 = sn9c102_i2c_try_read(cam, &hv7131d, 0x01); if (err || r0 < 0 || r1 < 0) return -EIO; if ((r0 != 0x00 && r0 != 0x01) || r1 != 0x04) return -ENODEV; sn9c102_attach_sensor(cam, &hv7131d); return 0; }
gpl-2.0
ISTweak/android_kernel_samsung_msm8660
sound/pci/vx222/vx222_ops.c
12535
35542
/* * Driver for Digigram VX222 V2/Mic soundcards * * VX222-specific low-level routines * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include <asm/io.h> #include "vx222.h" static int vx2_reg_offset[VX_REG_MAX] = { [VX_ICR] = 0x00, [VX_CVR] = 0x04, [VX_ISR] = 0x08, [VX_IVR] = 0x0c, [VX_RXH] = 0x14, [VX_RXM] = 0x18, [VX_RXL] = 0x1c, [VX_DMA] = 0x10, [VX_CDSP] = 0x20, [VX_CFG] = 0x24, [VX_RUER] = 0x28, [VX_DATA] = 0x2c, [VX_STATUS] = 0x30, [VX_LOFREQ] = 0x34, [VX_HIFREQ] = 0x38, [VX_CSUER] = 0x3c, [VX_SELMIC] = 0x40, [VX_COMPOT] = 0x44, // Write: POTENTIOMETER ; Read: COMPRESSION LEVEL activate [VX_SCOMPR] = 0x48, // Read: COMPRESSION THRESHOLD activate [VX_GLIMIT] = 0x4c, // Read: LEVEL LIMITATION activate [VX_INTCSR] = 0x4c, // VX_INTCSR_REGISTER_OFFSET [VX_CNTRL] = 0x50, // VX_CNTRL_REGISTER_OFFSET [VX_GPIOC] = 0x54, // VX_GPIOC (new with PLX9030) }; static int vx2_reg_index[VX_REG_MAX] = { [VX_ICR] = 1, [VX_CVR] = 1, [VX_ISR] = 1, [VX_IVR] = 1, [VX_RXH] = 1, [VX_RXM] = 1, [VX_RXL] = 1, [VX_DMA] = 1, [VX_CDSP] = 1, [VX_CFG] = 1, [VX_RUER] = 1, [VX_DATA] = 1, [VX_STATUS] = 1, [VX_LOFREQ] = 1, [VX_HIFREQ] = 1, [VX_CSUER] = 1, [VX_SELMIC] = 1, [VX_COMPOT] = 1, [VX_SCOMPR] = 1, [VX_GLIMIT] = 1, [VX_INTCSR] = 0, /* on the PLX */ [VX_CNTRL] = 0, /* on the PLX */ [VX_GPIOC] = 0, /* on the PLX */ }; static inline unsigned long vx2_reg_addr(struct vx_core *_chip, int reg) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; return chip->port[vx2_reg_index[reg]] + vx2_reg_offset[reg]; } /** * snd_vx_inb - read a byte from the register * @offset: register enum */ static unsigned char vx2_inb(struct vx_core *chip, int offset) { return inb(vx2_reg_addr(chip, offset)); } /** * snd_vx_outb - write a byte on the register * @offset: the register offset * @val: the value to write */ static void vx2_outb(struct vx_core *chip, int offset, unsigned char val) { outb(val, vx2_reg_addr(chip, offset)); /* printk(KERN_DEBUG "outb: %x -> %x\n", val, vx2_reg_addr(chip, offset)); */ } /** * snd_vx_inl - read a 32bit word from the register * @offset: register enum */ static unsigned int vx2_inl(struct vx_core *chip, int offset) { return inl(vx2_reg_addr(chip, offset)); } /** * snd_vx_outl - write a 32bit word on the register * @offset: the register enum * @val: the value to write */ static void vx2_outl(struct vx_core *chip, int offset, unsigned int val) { /* printk(KERN_DEBUG "outl: %x -> %x\n", val, vx2_reg_addr(chip, offset)); */ outl(val, vx2_reg_addr(chip, offset)); } /* * redefine macros to call directly */ #undef vx_inb #define vx_inb(chip,reg) vx2_inb((struct vx_core*)(chip), VX_##reg) #undef vx_outb #define vx_outb(chip,reg,val) vx2_outb((struct vx_core*)(chip), VX_##reg, val) #undef vx_inl #define vx_inl(chip,reg) vx2_inl((struct vx_core*)(chip), VX_##reg) #undef vx_outl #define vx_outl(chip,reg,val) vx2_outl((struct vx_core*)(chip), VX_##reg, val) /* * vx_reset_dsp - reset the DSP */ #define XX_DSP_RESET_WAIT_TIME 2 /* ms */ static void vx2_reset_dsp(struct vx_core *_chip) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; /* set the reset dsp bit to 0 */ vx_outl(chip, CDSP, chip->regCDSP & ~VX_CDSP_DSP_RESET_MASK); mdelay(XX_DSP_RESET_WAIT_TIME); chip->regCDSP |= VX_CDSP_DSP_RESET_MASK; /* set the reset dsp bit to 1 */ vx_outl(chip, CDSP, chip->regCDSP); } static int vx2_test_xilinx(struct vx_core *_chip) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; unsigned int data; snd_printdd("testing xilinx...\n"); /* This test uses several write/read sequences on TEST0 and TEST1 bits * to figure out whever or not the xilinx was correctly loaded */ /* We write 1 on CDSP.TEST0. We should get 0 on STATUS.TEST0. */ vx_outl(chip, CDSP, chip->regCDSP | VX_CDSP_TEST0_MASK); vx_inl(chip, ISR); data = vx_inl(chip, STATUS); if ((data & VX_STATUS_VAL_TEST0_MASK) == VX_STATUS_VAL_TEST0_MASK) { snd_printdd("bad!\n"); return -ENODEV; } /* We write 0 on CDSP.TEST0. We should get 1 on STATUS.TEST0. */ vx_outl(chip, CDSP, chip->regCDSP & ~VX_CDSP_TEST0_MASK); vx_inl(chip, ISR); data = vx_inl(chip, STATUS); if (! (data & VX_STATUS_VAL_TEST0_MASK)) { snd_printdd("bad! #2\n"); return -ENODEV; } if (_chip->type == VX_TYPE_BOARD) { /* not implemented on VX_2_BOARDS */ /* We write 1 on CDSP.TEST1. We should get 0 on STATUS.TEST1. */ vx_outl(chip, CDSP, chip->regCDSP | VX_CDSP_TEST1_MASK); vx_inl(chip, ISR); data = vx_inl(chip, STATUS); if ((data & VX_STATUS_VAL_TEST1_MASK) == VX_STATUS_VAL_TEST1_MASK) { snd_printdd("bad! #3\n"); return -ENODEV; } /* We write 0 on CDSP.TEST1. We should get 1 on STATUS.TEST1. */ vx_outl(chip, CDSP, chip->regCDSP & ~VX_CDSP_TEST1_MASK); vx_inl(chip, ISR); data = vx_inl(chip, STATUS); if (! (data & VX_STATUS_VAL_TEST1_MASK)) { snd_printdd("bad! #4\n"); return -ENODEV; } } snd_printdd("ok, xilinx fine.\n"); return 0; } /** * vx_setup_pseudo_dma - set up the pseudo dma read/write mode. * @do_write: 0 = read, 1 = set up for DMA write */ static void vx2_setup_pseudo_dma(struct vx_core *chip, int do_write) { /* Interrupt mode and HREQ pin enabled for host transmit data transfers * (in case of the use of the pseudo-dma facility). */ vx_outl(chip, ICR, do_write ? ICR_TREQ : ICR_RREQ); /* Reset the pseudo-dma register (in case of the use of the * pseudo-dma facility). */ vx_outl(chip, RESET_DMA, 0); } /* * vx_release_pseudo_dma - disable the pseudo-DMA mode */ static inline void vx2_release_pseudo_dma(struct vx_core *chip) { /* HREQ pin disabled. */ vx_outl(chip, ICR, 0); } /* pseudo-dma write */ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime, struct vx_pipe *pipe, int count) { unsigned long port = vx2_reg_addr(chip, VX_DMA); int offset = pipe->hw_ptr; u32 *addr = (u32 *)(runtime->dma_area + offset); if (snd_BUG_ON(count % 4)) return; vx2_setup_pseudo_dma(chip, 1); /* Transfer using pseudo-dma. */ if (offset + count > pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ while (length-- > 0) { outl(cpu_to_le32(*addr), port); addr++; } addr = (u32 *)runtime->dma_area; pipe->hw_ptr = 0; } pipe->hw_ptr += count; count >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ while (count-- > 0) { outl(cpu_to_le32(*addr), port); addr++; } vx2_release_pseudo_dma(chip); } /* pseudo dma read */ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime, struct vx_pipe *pipe, int count) { int offset = pipe->hw_ptr; u32 *addr = (u32 *)(runtime->dma_area + offset); unsigned long port = vx2_reg_addr(chip, VX_DMA); if (snd_BUG_ON(count % 4)) return; vx2_setup_pseudo_dma(chip, 0); /* Transfer using pseudo-dma. */ if (offset + count > pipe->buffer_bytes) { int length = pipe->buffer_bytes - offset; count -= length; length >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ while (length-- > 0) *addr++ = le32_to_cpu(inl(port)); addr = (u32 *)runtime->dma_area; pipe->hw_ptr = 0; } pipe->hw_ptr += count; count >>= 2; /* in 32bit words */ /* Transfer using pseudo-dma. */ while (count-- > 0) *addr++ = le32_to_cpu(inl(port)); vx2_release_pseudo_dma(chip); } #define VX_XILINX_RESET_MASK 0x40000000 #define VX_USERBIT0_MASK 0x00000004 #define VX_USERBIT1_MASK 0x00000020 #define VX_CNTRL_REGISTER_VALUE 0x00172012 /* * transfer counts bits to PLX */ static int put_xilinx_data(struct vx_core *chip, unsigned int port, unsigned int counts, unsigned char data) { unsigned int i; for (i = 0; i < counts; i++) { unsigned int val; /* set the clock bit to 0. */ val = VX_CNTRL_REGISTER_VALUE & ~VX_USERBIT0_MASK; vx2_outl(chip, port, val); vx2_inl(chip, port); udelay(1); if (data & (1 << i)) val |= VX_USERBIT1_MASK; else val &= ~VX_USERBIT1_MASK; vx2_outl(chip, port, val); vx2_inl(chip, port); /* set the clock bit to 1. */ val |= VX_USERBIT0_MASK; vx2_outl(chip, port, val); vx2_inl(chip, port); udelay(1); } return 0; } /* * load the xilinx image */ static int vx2_load_xilinx_binary(struct vx_core *chip, const struct firmware *xilinx) { unsigned int i; unsigned int port; const unsigned char *image; /* XILINX reset (wait at least 1 millisecond between reset on and off). */ vx_outl(chip, CNTRL, VX_CNTRL_REGISTER_VALUE | VX_XILINX_RESET_MASK); vx_inl(chip, CNTRL); msleep(10); vx_outl(chip, CNTRL, VX_CNTRL_REGISTER_VALUE); vx_inl(chip, CNTRL); msleep(10); if (chip->type == VX_TYPE_BOARD) port = VX_CNTRL; else port = VX_GPIOC; /* VX222 V2 and VX222_MIC_BOARD with new PLX9030 use this register */ image = xilinx->data; for (i = 0; i < xilinx->size; i++, image++) { if (put_xilinx_data(chip, port, 8, *image) < 0) return -EINVAL; /* don't take too much time in this loop... */ cond_resched(); } put_xilinx_data(chip, port, 4, 0xff); /* end signature */ msleep(200); /* test after loading (is buggy with VX222) */ if (chip->type != VX_TYPE_BOARD) { /* Test if load successful: test bit 8 of register GPIOC (VX222: use CNTRL) ! */ i = vx_inl(chip, GPIOC); if (i & 0x0100) return 0; snd_printk(KERN_ERR "vx222: xilinx test failed after load, GPIOC=0x%x\n", i); return -EINVAL; } return 0; } /* * load the boot/dsp images */ static int vx2_load_dsp(struct vx_core *vx, int index, const struct firmware *dsp) { int err; switch (index) { case 1: /* xilinx image */ if ((err = vx2_load_xilinx_binary(vx, dsp)) < 0) return err; if ((err = vx2_test_xilinx(vx)) < 0) return err; return 0; case 2: /* DSP boot */ return snd_vx_dsp_boot(vx, dsp); case 3: /* DSP image */ return snd_vx_dsp_load(vx, dsp); default: snd_BUG(); return -EINVAL; } } /* * vx_test_and_ack - test and acknowledge interrupt * * called from irq hander, too * * spinlock held! */ static int vx2_test_and_ack(struct vx_core *chip) { /* not booted yet? */ if (! (chip->chip_status & VX_STAT_XILINX_LOADED)) return -ENXIO; if (! (vx_inl(chip, STATUS) & VX_STATUS_MEMIRQ_MASK)) return -EIO; /* ok, interrupts generated, now ack it */ /* set ACQUIT bit up and down */ vx_outl(chip, STATUS, 0); /* useless read just to spend some time and maintain * the ACQUIT signal up for a while ( a bus cycle ) */ vx_inl(chip, STATUS); /* ack */ vx_outl(chip, STATUS, VX_STATUS_MEMIRQ_MASK); /* useless read just to spend some time and maintain * the ACQUIT signal up for a while ( a bus cycle ) */ vx_inl(chip, STATUS); /* clear */ vx_outl(chip, STATUS, 0); return 0; } /* * vx_validate_irq - enable/disable IRQ */ static void vx2_validate_irq(struct vx_core *_chip, int enable) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; /* Set the interrupt enable bit to 1 in CDSP register */ if (enable) { /* Set the PCI interrupt enable bit to 1.*/ vx_outl(chip, INTCSR, VX_INTCSR_VALUE|VX_PCI_INTERRUPT_MASK); chip->regCDSP |= VX_CDSP_VALID_IRQ_MASK; } else { /* Set the PCI interrupt enable bit to 0. */ vx_outl(chip, INTCSR, VX_INTCSR_VALUE&~VX_PCI_INTERRUPT_MASK); chip->regCDSP &= ~VX_CDSP_VALID_IRQ_MASK; } vx_outl(chip, CDSP, chip->regCDSP); } /* * write an AKM codec data (24bit) */ static void vx2_write_codec_reg(struct vx_core *chip, unsigned int data) { unsigned int i; vx_inl(chip, HIFREQ); /* We have to send 24 bits (3 x 8 bits). Start with most signif. Bit */ for (i = 0; i < 24; i++, data <<= 1) vx_outl(chip, DATA, ((data & 0x800000) ? VX_DATA_CODEC_MASK : 0)); /* Terminate access to codec registers */ vx_inl(chip, RUER); } #define AKM_CODEC_POWER_CONTROL_CMD 0xA007 #define AKM_CODEC_RESET_ON_CMD 0xA100 #define AKM_CODEC_RESET_OFF_CMD 0xA103 #define AKM_CODEC_CLOCK_FORMAT_CMD 0xA240 #define AKM_CODEC_MUTE_CMD 0xA38D #define AKM_CODEC_UNMUTE_CMD 0xA30D #define AKM_CODEC_LEFT_LEVEL_CMD 0xA400 #define AKM_CODEC_RIGHT_LEVEL_CMD 0xA500 static const u8 vx2_akm_gains_lut[VX2_AKM_LEVEL_MAX+1] = { 0x7f, // [000] = +0.000 dB -> AKM(0x7f) = +0.000 dB error(+0.000 dB) 0x7d, // [001] = -0.500 dB -> AKM(0x7d) = -0.572 dB error(-0.072 dB) 0x7c, // [002] = -1.000 dB -> AKM(0x7c) = -0.873 dB error(+0.127 dB) 0x7a, // [003] = -1.500 dB -> AKM(0x7a) = -1.508 dB error(-0.008 dB) 0x79, // [004] = -2.000 dB -> AKM(0x79) = -1.844 dB error(+0.156 dB) 0x77, // [005] = -2.500 dB -> AKM(0x77) = -2.557 dB error(-0.057 dB) 0x76, // [006] = -3.000 dB -> AKM(0x76) = -2.937 dB error(+0.063 dB) 0x75, // [007] = -3.500 dB -> AKM(0x75) = -3.334 dB error(+0.166 dB) 0x73, // [008] = -4.000 dB -> AKM(0x73) = -4.188 dB error(-0.188 dB) 0x72, // [009] = -4.500 dB -> AKM(0x72) = -4.648 dB error(-0.148 dB) 0x71, // [010] = -5.000 dB -> AKM(0x71) = -5.134 dB error(-0.134 dB) 0x70, // [011] = -5.500 dB -> AKM(0x70) = -5.649 dB error(-0.149 dB) 0x6f, // [012] = -6.000 dB -> AKM(0x6f) = -6.056 dB error(-0.056 dB) 0x6d, // [013] = -6.500 dB -> AKM(0x6d) = -6.631 dB error(-0.131 dB) 0x6c, // [014] = -7.000 dB -> AKM(0x6c) = -6.933 dB error(+0.067 dB) 0x6a, // [015] = -7.500 dB -> AKM(0x6a) = -7.571 dB error(-0.071 dB) 0x69, // [016] = -8.000 dB -> AKM(0x69) = -7.909 dB error(+0.091 dB) 0x67, // [017] = -8.500 dB -> AKM(0x67) = -8.626 dB error(-0.126 dB) 0x66, // [018] = -9.000 dB -> AKM(0x66) = -9.008 dB error(-0.008 dB) 0x65, // [019] = -9.500 dB -> AKM(0x65) = -9.407 dB error(+0.093 dB) 0x64, // [020] = -10.000 dB -> AKM(0x64) = -9.826 dB error(+0.174 dB) 0x62, // [021] = -10.500 dB -> AKM(0x62) = -10.730 dB error(-0.230 dB) 0x61, // [022] = -11.000 dB -> AKM(0x61) = -11.219 dB error(-0.219 dB) 0x60, // [023] = -11.500 dB -> AKM(0x60) = -11.738 dB error(-0.238 dB) 0x5f, // [024] = -12.000 dB -> AKM(0x5f) = -12.149 dB error(-0.149 dB) 0x5e, // [025] = -12.500 dB -> AKM(0x5e) = -12.434 dB error(+0.066 dB) 0x5c, // [026] = -13.000 dB -> AKM(0x5c) = -13.033 dB error(-0.033 dB) 0x5b, // [027] = -13.500 dB -> AKM(0x5b) = -13.350 dB error(+0.150 dB) 0x59, // [028] = -14.000 dB -> AKM(0x59) = -14.018 dB error(-0.018 dB) 0x58, // [029] = -14.500 dB -> AKM(0x58) = -14.373 dB error(+0.127 dB) 0x56, // [030] = -15.000 dB -> AKM(0x56) = -15.130 dB error(-0.130 dB) 0x55, // [031] = -15.500 dB -> AKM(0x55) = -15.534 dB error(-0.034 dB) 0x54, // [032] = -16.000 dB -> AKM(0x54) = -15.958 dB error(+0.042 dB) 0x53, // [033] = -16.500 dB -> AKM(0x53) = -16.404 dB error(+0.096 dB) 0x52, // [034] = -17.000 dB -> AKM(0x52) = -16.874 dB error(+0.126 dB) 0x51, // [035] = -17.500 dB -> AKM(0x51) = -17.371 dB error(+0.129 dB) 0x50, // [036] = -18.000 dB -> AKM(0x50) = -17.898 dB error(+0.102 dB) 0x4e, // [037] = -18.500 dB -> AKM(0x4e) = -18.605 dB error(-0.105 dB) 0x4d, // [038] = -19.000 dB -> AKM(0x4d) = -18.905 dB error(+0.095 dB) 0x4b, // [039] = -19.500 dB -> AKM(0x4b) = -19.538 dB error(-0.038 dB) 0x4a, // [040] = -20.000 dB -> AKM(0x4a) = -19.872 dB error(+0.128 dB) 0x48, // [041] = -20.500 dB -> AKM(0x48) = -20.583 dB error(-0.083 dB) 0x47, // [042] = -21.000 dB -> AKM(0x47) = -20.961 dB error(+0.039 dB) 0x46, // [043] = -21.500 dB -> AKM(0x46) = -21.356 dB error(+0.144 dB) 0x44, // [044] = -22.000 dB -> AKM(0x44) = -22.206 dB error(-0.206 dB) 0x43, // [045] = -22.500 dB -> AKM(0x43) = -22.664 dB error(-0.164 dB) 0x42, // [046] = -23.000 dB -> AKM(0x42) = -23.147 dB error(-0.147 dB) 0x41, // [047] = -23.500 dB -> AKM(0x41) = -23.659 dB error(-0.159 dB) 0x40, // [048] = -24.000 dB -> AKM(0x40) = -24.203 dB error(-0.203 dB) 0x3f, // [049] = -24.500 dB -> AKM(0x3f) = -24.635 dB error(-0.135 dB) 0x3e, // [050] = -25.000 dB -> AKM(0x3e) = -24.935 dB error(+0.065 dB) 0x3c, // [051] = -25.500 dB -> AKM(0x3c) = -25.569 dB error(-0.069 dB) 0x3b, // [052] = -26.000 dB -> AKM(0x3b) = -25.904 dB error(+0.096 dB) 0x39, // [053] = -26.500 dB -> AKM(0x39) = -26.615 dB error(-0.115 dB) 0x38, // [054] = -27.000 dB -> AKM(0x38) = -26.994 dB error(+0.006 dB) 0x37, // [055] = -27.500 dB -> AKM(0x37) = -27.390 dB error(+0.110 dB) 0x36, // [056] = -28.000 dB -> AKM(0x36) = -27.804 dB error(+0.196 dB) 0x34, // [057] = -28.500 dB -> AKM(0x34) = -28.699 dB error(-0.199 dB) 0x33, // [058] = -29.000 dB -> AKM(0x33) = -29.183 dB error(-0.183 dB) 0x32, // [059] = -29.500 dB -> AKM(0x32) = -29.696 dB error(-0.196 dB) 0x31, // [060] = -30.000 dB -> AKM(0x31) = -30.241 dB error(-0.241 dB) 0x31, // [061] = -30.500 dB -> AKM(0x31) = -30.241 dB error(+0.259 dB) 0x30, // [062] = -31.000 dB -> AKM(0x30) = -30.823 dB error(+0.177 dB) 0x2e, // [063] = -31.500 dB -> AKM(0x2e) = -31.610 dB error(-0.110 dB) 0x2d, // [064] = -32.000 dB -> AKM(0x2d) = -31.945 dB error(+0.055 dB) 0x2b, // [065] = -32.500 dB -> AKM(0x2b) = -32.659 dB error(-0.159 dB) 0x2a, // [066] = -33.000 dB -> AKM(0x2a) = -33.038 dB error(-0.038 dB) 0x29, // [067] = -33.500 dB -> AKM(0x29) = -33.435 dB error(+0.065 dB) 0x28, // [068] = -34.000 dB -> AKM(0x28) = -33.852 dB error(+0.148 dB) 0x27, // [069] = -34.500 dB -> AKM(0x27) = -34.289 dB error(+0.211 dB) 0x25, // [070] = -35.000 dB -> AKM(0x25) = -35.235 dB error(-0.235 dB) 0x24, // [071] = -35.500 dB -> AKM(0x24) = -35.750 dB error(-0.250 dB) 0x24, // [072] = -36.000 dB -> AKM(0x24) = -35.750 dB error(+0.250 dB) 0x23, // [073] = -36.500 dB -> AKM(0x23) = -36.297 dB error(+0.203 dB) 0x22, // [074] = -37.000 dB -> AKM(0x22) = -36.881 dB error(+0.119 dB) 0x21, // [075] = -37.500 dB -> AKM(0x21) = -37.508 dB error(-0.008 dB) 0x20, // [076] = -38.000 dB -> AKM(0x20) = -38.183 dB error(-0.183 dB) 0x1f, // [077] = -38.500 dB -> AKM(0x1f) = -38.726 dB error(-0.226 dB) 0x1e, // [078] = -39.000 dB -> AKM(0x1e) = -39.108 dB error(-0.108 dB) 0x1d, // [079] = -39.500 dB -> AKM(0x1d) = -39.507 dB error(-0.007 dB) 0x1c, // [080] = -40.000 dB -> AKM(0x1c) = -39.926 dB error(+0.074 dB) 0x1b, // [081] = -40.500 dB -> AKM(0x1b) = -40.366 dB error(+0.134 dB) 0x1a, // [082] = -41.000 dB -> AKM(0x1a) = -40.829 dB error(+0.171 dB) 0x19, // [083] = -41.500 dB -> AKM(0x19) = -41.318 dB error(+0.182 dB) 0x18, // [084] = -42.000 dB -> AKM(0x18) = -41.837 dB error(+0.163 dB) 0x17, // [085] = -42.500 dB -> AKM(0x17) = -42.389 dB error(+0.111 dB) 0x16, // [086] = -43.000 dB -> AKM(0x16) = -42.978 dB error(+0.022 dB) 0x15, // [087] = -43.500 dB -> AKM(0x15) = -43.610 dB error(-0.110 dB) 0x14, // [088] = -44.000 dB -> AKM(0x14) = -44.291 dB error(-0.291 dB) 0x14, // [089] = -44.500 dB -> AKM(0x14) = -44.291 dB error(+0.209 dB) 0x13, // [090] = -45.000 dB -> AKM(0x13) = -45.031 dB error(-0.031 dB) 0x12, // [091] = -45.500 dB -> AKM(0x12) = -45.840 dB error(-0.340 dB) 0x12, // [092] = -46.000 dB -> AKM(0x12) = -45.840 dB error(+0.160 dB) 0x11, // [093] = -46.500 dB -> AKM(0x11) = -46.731 dB error(-0.231 dB) 0x11, // [094] = -47.000 dB -> AKM(0x11) = -46.731 dB error(+0.269 dB) 0x10, // [095] = -47.500 dB -> AKM(0x10) = -47.725 dB error(-0.225 dB) 0x10, // [096] = -48.000 dB -> AKM(0x10) = -47.725 dB error(+0.275 dB) 0x0f, // [097] = -48.500 dB -> AKM(0x0f) = -48.553 dB error(-0.053 dB) 0x0e, // [098] = -49.000 dB -> AKM(0x0e) = -49.152 dB error(-0.152 dB) 0x0d, // [099] = -49.500 dB -> AKM(0x0d) = -49.796 dB error(-0.296 dB) 0x0d, // [100] = -50.000 dB -> AKM(0x0d) = -49.796 dB error(+0.204 dB) 0x0c, // [101] = -50.500 dB -> AKM(0x0c) = -50.491 dB error(+0.009 dB) 0x0b, // [102] = -51.000 dB -> AKM(0x0b) = -51.247 dB error(-0.247 dB) 0x0b, // [103] = -51.500 dB -> AKM(0x0b) = -51.247 dB error(+0.253 dB) 0x0a, // [104] = -52.000 dB -> AKM(0x0a) = -52.075 dB error(-0.075 dB) 0x0a, // [105] = -52.500 dB -> AKM(0x0a) = -52.075 dB error(+0.425 dB) 0x09, // [106] = -53.000 dB -> AKM(0x09) = -52.990 dB error(+0.010 dB) 0x09, // [107] = -53.500 dB -> AKM(0x09) = -52.990 dB error(+0.510 dB) 0x08, // [108] = -54.000 dB -> AKM(0x08) = -54.013 dB error(-0.013 dB) 0x08, // [109] = -54.500 dB -> AKM(0x08) = -54.013 dB error(+0.487 dB) 0x07, // [110] = -55.000 dB -> AKM(0x07) = -55.173 dB error(-0.173 dB) 0x07, // [111] = -55.500 dB -> AKM(0x07) = -55.173 dB error(+0.327 dB) 0x06, // [112] = -56.000 dB -> AKM(0x06) = -56.512 dB error(-0.512 dB) 0x06, // [113] = -56.500 dB -> AKM(0x06) = -56.512 dB error(-0.012 dB) 0x06, // [114] = -57.000 dB -> AKM(0x06) = -56.512 dB error(+0.488 dB) 0x05, // [115] = -57.500 dB -> AKM(0x05) = -58.095 dB error(-0.595 dB) 0x05, // [116] = -58.000 dB -> AKM(0x05) = -58.095 dB error(-0.095 dB) 0x05, // [117] = -58.500 dB -> AKM(0x05) = -58.095 dB error(+0.405 dB) 0x05, // [118] = -59.000 dB -> AKM(0x05) = -58.095 dB error(+0.905 dB) 0x04, // [119] = -59.500 dB -> AKM(0x04) = -60.034 dB error(-0.534 dB) 0x04, // [120] = -60.000 dB -> AKM(0x04) = -60.034 dB error(-0.034 dB) 0x04, // [121] = -60.500 dB -> AKM(0x04) = -60.034 dB error(+0.466 dB) 0x04, // [122] = -61.000 dB -> AKM(0x04) = -60.034 dB error(+0.966 dB) 0x03, // [123] = -61.500 dB -> AKM(0x03) = -62.532 dB error(-1.032 dB) 0x03, // [124] = -62.000 dB -> AKM(0x03) = -62.532 dB error(-0.532 dB) 0x03, // [125] = -62.500 dB -> AKM(0x03) = -62.532 dB error(-0.032 dB) 0x03, // [126] = -63.000 dB -> AKM(0x03) = -62.532 dB error(+0.468 dB) 0x03, // [127] = -63.500 dB -> AKM(0x03) = -62.532 dB error(+0.968 dB) 0x03, // [128] = -64.000 dB -> AKM(0x03) = -62.532 dB error(+1.468 dB) 0x02, // [129] = -64.500 dB -> AKM(0x02) = -66.054 dB error(-1.554 dB) 0x02, // [130] = -65.000 dB -> AKM(0x02) = -66.054 dB error(-1.054 dB) 0x02, // [131] = -65.500 dB -> AKM(0x02) = -66.054 dB error(-0.554 dB) 0x02, // [132] = -66.000 dB -> AKM(0x02) = -66.054 dB error(-0.054 dB) 0x02, // [133] = -66.500 dB -> AKM(0x02) = -66.054 dB error(+0.446 dB) 0x02, // [134] = -67.000 dB -> AKM(0x02) = -66.054 dB error(+0.946 dB) 0x02, // [135] = -67.500 dB -> AKM(0x02) = -66.054 dB error(+1.446 dB) 0x02, // [136] = -68.000 dB -> AKM(0x02) = -66.054 dB error(+1.946 dB) 0x02, // [137] = -68.500 dB -> AKM(0x02) = -66.054 dB error(+2.446 dB) 0x02, // [138] = -69.000 dB -> AKM(0x02) = -66.054 dB error(+2.946 dB) 0x01, // [139] = -69.500 dB -> AKM(0x01) = -72.075 dB error(-2.575 dB) 0x01, // [140] = -70.000 dB -> AKM(0x01) = -72.075 dB error(-2.075 dB) 0x01, // [141] = -70.500 dB -> AKM(0x01) = -72.075 dB error(-1.575 dB) 0x01, // [142] = -71.000 dB -> AKM(0x01) = -72.075 dB error(-1.075 dB) 0x01, // [143] = -71.500 dB -> AKM(0x01) = -72.075 dB error(-0.575 dB) 0x01, // [144] = -72.000 dB -> AKM(0x01) = -72.075 dB error(-0.075 dB) 0x01, // [145] = -72.500 dB -> AKM(0x01) = -72.075 dB error(+0.425 dB) 0x01, // [146] = -73.000 dB -> AKM(0x01) = -72.075 dB error(+0.925 dB) 0x00}; // [147] = -73.500 dB -> AKM(0x00) = mute error(+infini) /* * pseudo-codec write entry */ static void vx2_write_akm(struct vx_core *chip, int reg, unsigned int data) { unsigned int val; if (reg == XX_CODEC_DAC_CONTROL_REGISTER) { vx2_write_codec_reg(chip, data ? AKM_CODEC_MUTE_CMD : AKM_CODEC_UNMUTE_CMD); return; } /* `data' is a value between 0x0 and VX2_AKM_LEVEL_MAX = 0x093, in the case of the AKM codecs, we need a look up table, as there is no linear matching between the driver codec values and the real dBu value */ if (snd_BUG_ON(data >= sizeof(vx2_akm_gains_lut))) return; switch (reg) { case XX_CODEC_LEVEL_LEFT_REGISTER: val = AKM_CODEC_LEFT_LEVEL_CMD; break; case XX_CODEC_LEVEL_RIGHT_REGISTER: val = AKM_CODEC_RIGHT_LEVEL_CMD; break; default: snd_BUG(); return; } val |= vx2_akm_gains_lut[data]; vx2_write_codec_reg(chip, val); } /* * write codec bit for old VX222 board */ static void vx2_old_write_codec_bit(struct vx_core *chip, int codec, unsigned int data) { int i; /* activate access to codec registers */ vx_inl(chip, HIFREQ); for (i = 0; i < 24; i++, data <<= 1) vx_outl(chip, DATA, ((data & 0x800000) ? VX_DATA_CODEC_MASK : 0)); /* Terminate access to codec registers */ vx_inl(chip, RUER); } /* * reset codec bit */ static void vx2_reset_codec(struct vx_core *_chip) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; /* Set the reset CODEC bit to 0. */ vx_outl(chip, CDSP, chip->regCDSP &~ VX_CDSP_CODEC_RESET_MASK); vx_inl(chip, CDSP); msleep(10); /* Set the reset CODEC bit to 1. */ chip->regCDSP |= VX_CDSP_CODEC_RESET_MASK; vx_outl(chip, CDSP, chip->regCDSP); vx_inl(chip, CDSP); if (_chip->type == VX_TYPE_BOARD) { msleep(1); return; } msleep(5); /* additionnel wait time for AKM's */ vx2_write_codec_reg(_chip, AKM_CODEC_POWER_CONTROL_CMD); /* DAC power up, ADC power up, Vref power down */ vx2_write_codec_reg(_chip, AKM_CODEC_CLOCK_FORMAT_CMD); /* default */ vx2_write_codec_reg(_chip, AKM_CODEC_MUTE_CMD); /* Mute = ON ,Deemphasis = OFF */ vx2_write_codec_reg(_chip, AKM_CODEC_RESET_OFF_CMD); /* DAC and ADC normal operation */ if (_chip->type == VX_TYPE_MIC) { /* set up the micro input selector */ chip->regSELMIC = MICRO_SELECT_INPUT_NORM | MICRO_SELECT_PREAMPLI_G_0 | MICRO_SELECT_NOISE_T_52DB; /* reset phantom power supply */ chip->regSELMIC &= ~MICRO_SELECT_PHANTOM_ALIM; vx_outl(_chip, SELMIC, chip->regSELMIC); } } /* * change the audio source */ static void vx2_change_audio_source(struct vx_core *_chip, int src) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; switch (src) { case VX_AUDIO_SRC_DIGITAL: chip->regCFG |= VX_CFG_DATAIN_SEL_MASK; break; default: chip->regCFG &= ~VX_CFG_DATAIN_SEL_MASK; break; } vx_outl(chip, CFG, chip->regCFG); } /* * set the clock source */ static void vx2_set_clock_source(struct vx_core *_chip, int source) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; if (source == INTERNAL_QUARTZ) chip->regCFG &= ~VX_CFG_CLOCKIN_SEL_MASK; else chip->regCFG |= VX_CFG_CLOCKIN_SEL_MASK; vx_outl(chip, CFG, chip->regCFG); } /* * reset the board */ static void vx2_reset_board(struct vx_core *_chip, int cold_reset) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; /* initialize the register values */ chip->regCDSP = VX_CDSP_CODEC_RESET_MASK | VX_CDSP_DSP_RESET_MASK ; chip->regCFG = 0; } /* * input level controls for VX222 Mic */ /* Micro level is specified to be adjustable from -96dB to 63 dB (board coded 0x00 ... 318), * 318 = 210 + 36 + 36 + 36 (210 = +9dB variable) (3 * 36 = 3 steps of 18dB pre ampli) * as we will mute if less than -110dB, so let's simply use line input coded levels and add constant offset ! */ #define V2_MICRO_LEVEL_RANGE (318 - 255) static void vx2_set_input_level(struct snd_vx222 *chip) { int i, miclevel, preamp; unsigned int data; miclevel = chip->mic_level; miclevel += V2_MICRO_LEVEL_RANGE; /* add 318 - 0xff */ preamp = 0; while (miclevel > 210) { /* limitation to +9dB of 3310 real gain */ preamp++; /* raise pre ampli + 18dB */ miclevel -= (18 * 2); /* lower level 18 dB (*2 because of 0.5 dB steps !) */ } if (snd_BUG_ON(preamp >= 4)) return; /* set pre-amp level */ chip->regSELMIC &= ~MICRO_SELECT_PREAMPLI_MASK; chip->regSELMIC |= (preamp << MICRO_SELECT_PREAMPLI_OFFSET) & MICRO_SELECT_PREAMPLI_MASK; vx_outl(chip, SELMIC, chip->regSELMIC); data = (unsigned int)miclevel << 16 | (unsigned int)chip->input_level[1] << 8 | (unsigned int)chip->input_level[0]; vx_inl(chip, DATA); /* Activate input level programming */ /* We have to send 32 bits (4 x 8 bits) */ for (i = 0; i < 32; i++, data <<= 1) vx_outl(chip, DATA, ((data & 0x80000000) ? VX_DATA_CODEC_MASK : 0)); vx_inl(chip, RUER); /* Terminate input level programming */ } #define MIC_LEVEL_MAX 0xff static const DECLARE_TLV_DB_SCALE(db_scale_mic, -6450, 50, 0); /* * controls API for input levels */ /* input levels */ static int vx_input_level_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = MIC_LEVEL_MAX; return 0; } static int vx_input_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vx222 *chip = (struct snd_vx222 *)_chip; mutex_lock(&_chip->mixer_mutex); ucontrol->value.integer.value[0] = chip->input_level[0]; ucontrol->value.integer.value[1] = chip->input_level[1]; mutex_unlock(&_chip->mixer_mutex); return 0; } static int vx_input_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vx222 *chip = (struct snd_vx222 *)_chip; if (ucontrol->value.integer.value[0] < 0 || ucontrol->value.integer.value[0] > MIC_LEVEL_MAX) return -EINVAL; if (ucontrol->value.integer.value[1] < 0 || ucontrol->value.integer.value[1] > MIC_LEVEL_MAX) return -EINVAL; mutex_lock(&_chip->mixer_mutex); if (chip->input_level[0] != ucontrol->value.integer.value[0] || chip->input_level[1] != ucontrol->value.integer.value[1]) { chip->input_level[0] = ucontrol->value.integer.value[0]; chip->input_level[1] = ucontrol->value.integer.value[1]; vx2_set_input_level(chip); mutex_unlock(&_chip->mixer_mutex); return 1; } mutex_unlock(&_chip->mixer_mutex); return 0; } /* mic level */ static int vx_mic_level_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = MIC_LEVEL_MAX; return 0; } static int vx_mic_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vx222 *chip = (struct snd_vx222 *)_chip; ucontrol->value.integer.value[0] = chip->mic_level; return 0; } static int vx_mic_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct vx_core *_chip = snd_kcontrol_chip(kcontrol); struct snd_vx222 *chip = (struct snd_vx222 *)_chip; if (ucontrol->value.integer.value[0] < 0 || ucontrol->value.integer.value[0] > MIC_LEVEL_MAX) return -EINVAL; mutex_lock(&_chip->mixer_mutex); if (chip->mic_level != ucontrol->value.integer.value[0]) { chip->mic_level = ucontrol->value.integer.value[0]; vx2_set_input_level(chip); mutex_unlock(&_chip->mixer_mutex); return 1; } mutex_unlock(&_chip->mixer_mutex); return 0; } static struct snd_kcontrol_new vx_control_input_level = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Capture Volume", .info = vx_input_level_info, .get = vx_input_level_get, .put = vx_input_level_put, .tlv = { .p = db_scale_mic }, }; static struct snd_kcontrol_new vx_control_mic_level = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Mic Capture Volume", .info = vx_mic_level_info, .get = vx_mic_level_get, .put = vx_mic_level_put, .tlv = { .p = db_scale_mic }, }; /* * FIXME: compressor/limiter implementation is missing yet... */ static int vx2_add_mic_controls(struct vx_core *_chip) { struct snd_vx222 *chip = (struct snd_vx222 *)_chip; int err; if (_chip->type != VX_TYPE_MIC) return 0; /* mute input levels */ chip->input_level[0] = chip->input_level[1] = 0; chip->mic_level = 0; vx2_set_input_level(chip); /* controls */ if ((err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_input_level, chip))) < 0) return err; if ((err = snd_ctl_add(_chip->card, snd_ctl_new1(&vx_control_mic_level, chip))) < 0) return err; return 0; } /* * callbacks */ struct snd_vx_ops vx222_ops = { .in8 = vx2_inb, .in32 = vx2_inl, .out8 = vx2_outb, .out32 = vx2_outl, .test_and_ack = vx2_test_and_ack, .validate_irq = vx2_validate_irq, .akm_write = vx2_write_akm, .reset_codec = vx2_reset_codec, .change_audio_source = vx2_change_audio_source, .set_clock_source = vx2_set_clock_source, .load_dsp = vx2_load_dsp, .reset_dsp = vx2_reset_dsp, .reset_board = vx2_reset_board, .dma_write = vx2_dma_write, .dma_read = vx2_dma_read, .add_controls = vx2_add_mic_controls, }; /* for old VX222 board */ struct snd_vx_ops vx222_old_ops = { .in8 = vx2_inb, .in32 = vx2_inl, .out8 = vx2_outb, .out32 = vx2_outl, .test_and_ack = vx2_test_and_ack, .validate_irq = vx2_validate_irq, .write_codec = vx2_old_write_codec_bit, .reset_codec = vx2_reset_codec, .change_audio_source = vx2_change_audio_source, .set_clock_source = vx2_set_clock_source, .load_dsp = vx2_load_dsp, .reset_dsp = vx2_reset_dsp, .reset_board = vx2_reset_board, .dma_write = vx2_dma_write, .dma_read = vx2_dma_read, };
gpl-2.0
mdr78/Linux-3.8.7-galileo
arch/microblaze/kernel/timer.c
248
7005
/* * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu> * Copyright (C) 2012-2013 Xilinx, Inc. * Copyright (C) 2007-2009 PetaLogix * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/sched_clock.h> #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/cpuinfo.h> static void __iomem *timer_baseaddr; static unsigned int freq_div_hz; static unsigned int timer_clock_freq; #define TCSR0 (0x00) #define TLR0 (0x04) #define TCR0 (0x08) #define TCSR1 (0x10) #define TLR1 (0x14) #define TCR1 (0x18) #define TCSR_MDT (1<<0) #define TCSR_UDT (1<<1) #define TCSR_GENT (1<<2) #define TCSR_CAPT (1<<3) #define TCSR_ARHT (1<<4) #define TCSR_LOAD (1<<5) #define TCSR_ENIT (1<<6) #define TCSR_ENT (1<<7) #define TCSR_TINT (1<<8) #define TCSR_PWMA (1<<9) #define TCSR_ENALL (1<<10) static inline void xilinx_timer0_stop(void) { out_be32(timer_baseaddr + TCSR0, in_be32(timer_baseaddr + TCSR0) & ~TCSR_ENT); } static inline void xilinx_timer0_start_periodic(unsigned long load_val) { if (!load_val) load_val = 1; /* loading value to timer reg */ out_be32(timer_baseaddr + TLR0, load_val); /* load the initial value */ out_be32(timer_baseaddr + TCSR0, TCSR_LOAD); /* see timer data sheet for detail * !ENALL - don't enable 'em all * !PWMA - disable pwm * TINT - clear interrupt status * ENT- enable timer itself * ENIT - enable interrupt * !LOAD - clear the bit to let go * ARHT - auto reload * !CAPT - no external trigger * !GENT - no external signal * UDT - set the timer as down counter * !MDT0 - generate mode */ out_be32(timer_baseaddr + TCSR0, TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT); } static inline void xilinx_timer0_start_oneshot(unsigned long load_val) { if (!load_val) load_val = 1; /* loading value to timer reg */ out_be32(timer_baseaddr + TLR0, load_val); /* load the initial value */ out_be32(timer_baseaddr + TCSR0, TCSR_LOAD); out_be32(timer_baseaddr + TCSR0, TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT); } static int xilinx_timer_set_next_event(unsigned long delta, struct clock_event_device *dev) { pr_debug("%s: next event, delta %x\n", __func__, (u32)delta); xilinx_timer0_start_oneshot(delta); return 0; } static void xilinx_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: pr_info("%s: periodic\n", __func__); xilinx_timer0_start_periodic(freq_div_hz); break; case CLOCK_EVT_MODE_ONESHOT: pr_info("%s: oneshot\n", __func__); break; case CLOCK_EVT_MODE_UNUSED: pr_info("%s: unused\n", __func__); break; case CLOCK_EVT_MODE_SHUTDOWN: pr_info("%s: shutdown\n", __func__); xilinx_timer0_stop(); break; case CLOCK_EVT_MODE_RESUME: pr_info("%s: resume\n", __func__); break; } } static struct clock_event_device clockevent_xilinx_timer = { .name = "xilinx_clockevent", .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, .shift = 8, .rating = 300, .set_next_event = xilinx_timer_set_next_event, .set_mode = xilinx_timer_set_mode, }; static inline void timer_ack(void) { out_be32(timer_baseaddr + TCSR0, in_be32(timer_baseaddr + TCSR0)); } static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &clockevent_xilinx_timer; #ifdef CONFIG_HEART_BEAT heartbeat(); #endif timer_ack(); evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction timer_irqaction = { .handler = timer_interrupt, .flags = IRQF_TIMER, .name = "timer", .dev_id = &clockevent_xilinx_timer, }; static __init void xilinx_clockevent_init(void) { clockevent_xilinx_timer.mult = div_sc(timer_clock_freq, NSEC_PER_SEC, clockevent_xilinx_timer.shift); clockevent_xilinx_timer.max_delta_ns = clockevent_delta2ns((u32)~0, &clockevent_xilinx_timer); clockevent_xilinx_timer.min_delta_ns = clockevent_delta2ns(1, &clockevent_xilinx_timer); clockevent_xilinx_timer.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_xilinx_timer); } static u64 xilinx_clock_read(void) { return in_be32(timer_baseaddr + TCR1); } static cycle_t xilinx_read(struct clocksource *cs) { /* reading actual value of timer 1 */ return (cycle_t)xilinx_clock_read(); } static struct timecounter xilinx_tc = { .cc = NULL, }; static cycle_t xilinx_cc_read(const struct cyclecounter *cc) { return xilinx_read(NULL); } static struct cyclecounter xilinx_cc = { .read = xilinx_cc_read, .mask = CLOCKSOURCE_MASK(32), .shift = 8, }; static int __init init_xilinx_timecounter(void) { xilinx_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC, xilinx_cc.shift); timecounter_init(&xilinx_tc, &xilinx_cc, sched_clock()); return 0; } static struct clocksource clocksource_microblaze = { .name = "xilinx_clocksource", .rating = 300, .read = xilinx_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static int __init xilinx_clocksource_init(void) { if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq)) panic("failed to register clocksource"); /* stop timer1 */ out_be32(timer_baseaddr + TCSR1, in_be32(timer_baseaddr + TCSR1) & ~TCSR_ENT); /* start timer1 - up counting without interrupt */ out_be32(timer_baseaddr + TCSR1, TCSR_TINT|TCSR_ENT|TCSR_ARHT); /* register timecounter - for ftrace support */ init_xilinx_timecounter(); return 0; } static void __init xilinx_timer_init(struct device_node *timer) { struct clk *clk; static int initialized; u32 irq; u32 timer_num = 1; if (initialized) return; initialized = 1; timer_baseaddr = of_iomap(timer, 0); if (!timer_baseaddr) { pr_err("ERROR: invalid timer base address\n"); BUG(); } irq = irq_of_parse_and_map(timer, 0); of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num); if (timer_num) { pr_emerg("Please enable two timers in HW\n"); BUG(); } pr_info("%s: irq=%d\n", timer->full_name, irq); clk = of_clk_get(timer, 0); if (IS_ERR(clk)) { pr_err("ERROR: timer CCF input clock not found\n"); /* If there is clock-frequency property than use it */ of_property_read_u32(timer, "clock-frequency", &timer_clock_freq); } else { timer_clock_freq = clk_get_rate(clk); } if (!timer_clock_freq) { pr_err("ERROR: Using CPU clock frequency\n"); timer_clock_freq = cpuinfo.cpu_clock_freq; } freq_div_hz = timer_clock_freq / HZ; setup_irq(irq, &timer_irqaction); #ifdef CONFIG_HEART_BEAT setup_heartbeat(); #endif xilinx_clocksource_init(); xilinx_clockevent_init(); sched_clock_register(xilinx_clock_read, 32, timer_clock_freq); } CLOCKSOURCE_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a", xilinx_timer_init);
gpl-2.0
jvaughan/san-francisco-kernel
drivers/i2c/busses/i2c-mv64xxx.c
504
17020
/* * Driver for the i2c controller on the Marvell line of host bridges * (e.g, gt642[46]0, mv643[46]0, mv644[46]0, and Orion SoC family). * * Author: Mark A. Greer <mgreer@mvista.com> * * 2005 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/mv643xx_i2c.h> #include <linux/platform_device.h> #include <asm/io.h> /* Register defines */ #define MV64XXX_I2C_REG_SLAVE_ADDR 0x00 #define MV64XXX_I2C_REG_DATA 0x04 #define MV64XXX_I2C_REG_CONTROL 0x08 #define MV64XXX_I2C_REG_STATUS 0x0c #define MV64XXX_I2C_REG_BAUD 0x0c #define MV64XXX_I2C_REG_EXT_SLAVE_ADDR 0x10 #define MV64XXX_I2C_REG_SOFT_RESET 0x1c #define MV64XXX_I2C_REG_CONTROL_ACK 0x00000004 #define MV64XXX_I2C_REG_CONTROL_IFLG 0x00000008 #define MV64XXX_I2C_REG_CONTROL_STOP 0x00000010 #define MV64XXX_I2C_REG_CONTROL_START 0x00000020 #define MV64XXX_I2C_REG_CONTROL_TWSIEN 0x00000040 #define MV64XXX_I2C_REG_CONTROL_INTEN 0x00000080 /* Ctlr status values */ #define MV64XXX_I2C_STATUS_BUS_ERR 0x00 #define MV64XXX_I2C_STATUS_MAST_START 0x08 #define MV64XXX_I2C_STATUS_MAST_REPEAT_START 0x10 #define MV64XXX_I2C_STATUS_MAST_WR_ADDR_ACK 0x18 #define MV64XXX_I2C_STATUS_MAST_WR_ADDR_NO_ACK 0x20 #define MV64XXX_I2C_STATUS_MAST_WR_ACK 0x28 #define MV64XXX_I2C_STATUS_MAST_WR_NO_ACK 0x30 #define MV64XXX_I2C_STATUS_MAST_LOST_ARB 0x38 #define MV64XXX_I2C_STATUS_MAST_RD_ADDR_ACK 0x40 #define MV64XXX_I2C_STATUS_MAST_RD_ADDR_NO_ACK 0x48 #define MV64XXX_I2C_STATUS_MAST_RD_DATA_ACK 0x50 #define MV64XXX_I2C_STATUS_MAST_RD_DATA_NO_ACK 0x58 #define MV64XXX_I2C_STATUS_MAST_WR_ADDR_2_ACK 0xd0 #define MV64XXX_I2C_STATUS_MAST_WR_ADDR_2_NO_ACK 0xd8 #define MV64XXX_I2C_STATUS_MAST_RD_ADDR_2_ACK 0xe0 #define MV64XXX_I2C_STATUS_MAST_RD_ADDR_2_NO_ACK 0xe8 #define MV64XXX_I2C_STATUS_NO_STATUS 0xf8 /* Driver states */ enum { MV64XXX_I2C_STATE_INVALID, MV64XXX_I2C_STATE_IDLE, MV64XXX_I2C_STATE_WAITING_FOR_START_COND, MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK, MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK, MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_ACK, MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_DATA, }; /* Driver actions */ enum { MV64XXX_I2C_ACTION_INVALID, MV64XXX_I2C_ACTION_CONTINUE, MV64XXX_I2C_ACTION_SEND_START, MV64XXX_I2C_ACTION_SEND_ADDR_1, MV64XXX_I2C_ACTION_SEND_ADDR_2, MV64XXX_I2C_ACTION_SEND_DATA, MV64XXX_I2C_ACTION_RCV_DATA, MV64XXX_I2C_ACTION_RCV_DATA_STOP, MV64XXX_I2C_ACTION_SEND_STOP, }; struct mv64xxx_i2c_data { int irq; u32 state; u32 action; u32 aborting; u32 cntl_bits; void __iomem *reg_base; u32 reg_base_p; u32 reg_size; u32 addr1; u32 addr2; u32 bytes_left; u32 byte_posn; u32 block; int rc; u32 freq_m; u32 freq_n; wait_queue_head_t waitq; spinlock_t lock; struct i2c_msg *msg; struct i2c_adapter adapter; }; /* ***************************************************************************** * * Finite State Machine & Interrupt Routines * ***************************************************************************** */ /* Reset hardware and initialize FSM */ static void mv64xxx_i2c_hw_init(struct mv64xxx_i2c_data *drv_data) { writel(0, drv_data->reg_base + MV64XXX_I2C_REG_SOFT_RESET); writel((((drv_data->freq_m & 0xf) << 3) | (drv_data->freq_n & 0x7)), drv_data->reg_base + MV64XXX_I2C_REG_BAUD); writel(0, drv_data->reg_base + MV64XXX_I2C_REG_SLAVE_ADDR); writel(0, drv_data->reg_base + MV64XXX_I2C_REG_EXT_SLAVE_ADDR); writel(MV64XXX_I2C_REG_CONTROL_TWSIEN | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); drv_data->state = MV64XXX_I2C_STATE_IDLE; } static void mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status) { /* * If state is idle, then this is likely the remnants of an old * operation that driver has given up on or the user has killed. * If so, issue the stop condition and go to idle. */ if (drv_data->state == MV64XXX_I2C_STATE_IDLE) { drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; return; } /* The status from the ctlr [mostly] tells us what to do next */ switch (status) { /* Start condition interrupt */ case MV64XXX_I2C_STATUS_MAST_START: /* 0x08 */ case MV64XXX_I2C_STATUS_MAST_REPEAT_START: /* 0x10 */ drv_data->action = MV64XXX_I2C_ACTION_SEND_ADDR_1; drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK; break; /* Performing a write */ case MV64XXX_I2C_STATUS_MAST_WR_ADDR_ACK: /* 0x18 */ if (drv_data->msg->flags & I2C_M_TEN) { drv_data->action = MV64XXX_I2C_ACTION_SEND_ADDR_2; drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK; break; } /* FALLTHRU */ case MV64XXX_I2C_STATUS_MAST_WR_ADDR_2_ACK: /* 0xd0 */ case MV64XXX_I2C_STATUS_MAST_WR_ACK: /* 0x28 */ if ((drv_data->bytes_left == 0) || (drv_data->aborting && (drv_data->byte_posn != 0))) { drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; drv_data->state = MV64XXX_I2C_STATE_IDLE; } else { drv_data->action = MV64XXX_I2C_ACTION_SEND_DATA; drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_ACK; drv_data->bytes_left--; } break; /* Performing a read */ case MV64XXX_I2C_STATUS_MAST_RD_ADDR_ACK: /* 40 */ if (drv_data->msg->flags & I2C_M_TEN) { drv_data->action = MV64XXX_I2C_ACTION_SEND_ADDR_2; drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK; break; } /* FALLTHRU */ case MV64XXX_I2C_STATUS_MAST_RD_ADDR_2_ACK: /* 0xe0 */ if (drv_data->bytes_left == 0) { drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; drv_data->state = MV64XXX_I2C_STATE_IDLE; break; } /* FALLTHRU */ case MV64XXX_I2C_STATUS_MAST_RD_DATA_ACK: /* 0x50 */ if (status != MV64XXX_I2C_STATUS_MAST_RD_DATA_ACK) drv_data->action = MV64XXX_I2C_ACTION_CONTINUE; else { drv_data->action = MV64XXX_I2C_ACTION_RCV_DATA; drv_data->bytes_left--; } drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_DATA; if ((drv_data->bytes_left == 1) || drv_data->aborting) drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_ACK; break; case MV64XXX_I2C_STATUS_MAST_RD_DATA_NO_ACK: /* 0x58 */ drv_data->action = MV64XXX_I2C_ACTION_RCV_DATA_STOP; drv_data->state = MV64XXX_I2C_STATE_IDLE; break; case MV64XXX_I2C_STATUS_MAST_WR_ADDR_NO_ACK: /* 0x20 */ case MV64XXX_I2C_STATUS_MAST_WR_NO_ACK: /* 30 */ case MV64XXX_I2C_STATUS_MAST_RD_ADDR_NO_ACK: /* 48 */ /* Doesn't seem to be a device at other end */ drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; drv_data->state = MV64XXX_I2C_STATE_IDLE; drv_data->rc = -ENODEV; break; default: dev_err(&drv_data->adapter.dev, "mv64xxx_i2c_fsm: Ctlr Error -- state: 0x%x, " "status: 0x%x, addr: 0x%x, flags: 0x%x\n", drv_data->state, status, drv_data->msg->addr, drv_data->msg->flags); drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; mv64xxx_i2c_hw_init(drv_data); drv_data->rc = -EIO; } } static void mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) { switch(drv_data->action) { case MV64XXX_I2C_ACTION_CONTINUE: writel(drv_data->cntl_bits, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); break; case MV64XXX_I2C_ACTION_SEND_START: writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); break; case MV64XXX_I2C_ACTION_SEND_ADDR_1: writel(drv_data->addr1, drv_data->reg_base + MV64XXX_I2C_REG_DATA); writel(drv_data->cntl_bits, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); break; case MV64XXX_I2C_ACTION_SEND_ADDR_2: writel(drv_data->addr2, drv_data->reg_base + MV64XXX_I2C_REG_DATA); writel(drv_data->cntl_bits, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); break; case MV64XXX_I2C_ACTION_SEND_DATA: writel(drv_data->msg->buf[drv_data->byte_posn++], drv_data->reg_base + MV64XXX_I2C_REG_DATA); writel(drv_data->cntl_bits, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); break; case MV64XXX_I2C_ACTION_RCV_DATA: drv_data->msg->buf[drv_data->byte_posn++] = readl(drv_data->reg_base + MV64XXX_I2C_REG_DATA); writel(drv_data->cntl_bits, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); break; case MV64XXX_I2C_ACTION_RCV_DATA_STOP: drv_data->msg->buf[drv_data->byte_posn++] = readl(drv_data->reg_base + MV64XXX_I2C_REG_DATA); drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN; writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); drv_data->block = 0; wake_up_interruptible(&drv_data->waitq); break; case MV64XXX_I2C_ACTION_INVALID: default: dev_err(&drv_data->adapter.dev, "mv64xxx_i2c_do_action: Invalid action: %d\n", drv_data->action); drv_data->rc = -EIO; /* FALLTHRU */ case MV64XXX_I2C_ACTION_SEND_STOP: drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN; writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); drv_data->block = 0; wake_up_interruptible(&drv_data->waitq); break; } } static irqreturn_t mv64xxx_i2c_intr(int irq, void *dev_id) { struct mv64xxx_i2c_data *drv_data = dev_id; unsigned long flags; u32 status; irqreturn_t rc = IRQ_NONE; spin_lock_irqsave(&drv_data->lock, flags); while (readl(drv_data->reg_base + MV64XXX_I2C_REG_CONTROL) & MV64XXX_I2C_REG_CONTROL_IFLG) { status = readl(drv_data->reg_base + MV64XXX_I2C_REG_STATUS); mv64xxx_i2c_fsm(drv_data, status); mv64xxx_i2c_do_action(drv_data); rc = IRQ_HANDLED; } spin_unlock_irqrestore(&drv_data->lock, flags); return rc; } /* ***************************************************************************** * * I2C Msg Execution Routines * ***************************************************************************** */ static void mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg) { u32 dir = 0; drv_data->msg = msg; drv_data->byte_posn = 0; drv_data->bytes_left = msg->len; drv_data->aborting = 0; drv_data->rc = 0; drv_data->cntl_bits = MV64XXX_I2C_REG_CONTROL_ACK | MV64XXX_I2C_REG_CONTROL_INTEN | MV64XXX_I2C_REG_CONTROL_TWSIEN; if (msg->flags & I2C_M_RD) dir = 1; if (msg->flags & I2C_M_REV_DIR_ADDR) dir ^= 1; if (msg->flags & I2C_M_TEN) { drv_data->addr1 = 0xf0 | (((u32)msg->addr & 0x300) >> 7) | dir; drv_data->addr2 = (u32)msg->addr & 0xff; } else { drv_data->addr1 = ((u32)msg->addr & 0x7f) << 1 | dir; drv_data->addr2 = 0; } } static void mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data) { long time_left; unsigned long flags; char abort = 0; time_left = wait_event_interruptible_timeout(drv_data->waitq, !drv_data->block, drv_data->adapter.timeout); spin_lock_irqsave(&drv_data->lock, flags); if (!time_left) { /* Timed out */ drv_data->rc = -ETIMEDOUT; abort = 1; } else if (time_left < 0) { /* Interrupted/Error */ drv_data->rc = time_left; /* errno value */ abort = 1; } if (abort && drv_data->block) { drv_data->aborting = 1; spin_unlock_irqrestore(&drv_data->lock, flags); time_left = wait_event_timeout(drv_data->waitq, !drv_data->block, drv_data->adapter.timeout); if ((time_left <= 0) && drv_data->block) { drv_data->state = MV64XXX_I2C_STATE_IDLE; dev_err(&drv_data->adapter.dev, "mv64xxx: I2C bus locked, block: %d, " "time_left: %d\n", drv_data->block, (int)time_left); mv64xxx_i2c_hw_init(drv_data); } } else spin_unlock_irqrestore(&drv_data->lock, flags); } static int mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg) { unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); mv64xxx_i2c_prepare_for_io(drv_data, msg); if (unlikely(msg->flags & I2C_M_NOSTART)) { /* Skip start/addr phases */ if (drv_data->msg->flags & I2C_M_RD) { /* No action to do, wait for slave to send a byte */ drv_data->action = MV64XXX_I2C_ACTION_CONTINUE; drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_DATA; } else { drv_data->action = MV64XXX_I2C_ACTION_SEND_DATA; drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_ACK; drv_data->bytes_left--; } } else { drv_data->action = MV64XXX_I2C_ACTION_SEND_START; drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND; } drv_data->block = 1; mv64xxx_i2c_do_action(drv_data); spin_unlock_irqrestore(&drv_data->lock, flags); mv64xxx_i2c_wait_for_completion(drv_data); return drv_data->rc; } /* ***************************************************************************** * * I2C Core Support Routines (Interface to higher level I2C code) * ***************************************************************************** */ static u32 mv64xxx_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | I2C_FUNC_SMBUS_EMUL; } static int mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap); int i, rc; for (i=0; i<num; i++) if ((rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[i])) < 0) return rc; return num; } static const struct i2c_algorithm mv64xxx_i2c_algo = { .master_xfer = mv64xxx_i2c_xfer, .functionality = mv64xxx_i2c_functionality, }; /* ***************************************************************************** * * Driver Interface & Early Init Routines * ***************************************************************************** */ static int __devinit mv64xxx_i2c_map_regs(struct platform_device *pd, struct mv64xxx_i2c_data *drv_data) { int size; struct resource *r = platform_get_resource(pd, IORESOURCE_MEM, 0); if (!r) return -ENODEV; size = resource_size(r); if (!request_mem_region(r->start, size, drv_data->adapter.name)) return -EBUSY; drv_data->reg_base = ioremap(r->start, size); drv_data->reg_base_p = r->start; drv_data->reg_size = size; return 0; } static void mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data) { if (drv_data->reg_base) { iounmap(drv_data->reg_base); release_mem_region(drv_data->reg_base_p, drv_data->reg_size); } drv_data->reg_base = NULL; drv_data->reg_base_p = 0; } static int __devinit mv64xxx_i2c_probe(struct platform_device *pd) { struct mv64xxx_i2c_data *drv_data; struct mv64xxx_i2c_pdata *pdata = pd->dev.platform_data; int rc; if ((pd->id != 0) || !pdata) return -ENODEV; drv_data = kzalloc(sizeof(struct mv64xxx_i2c_data), GFP_KERNEL); if (!drv_data) return -ENOMEM; if (mv64xxx_i2c_map_regs(pd, drv_data)) { rc = -ENODEV; goto exit_kfree; } strlcpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter", sizeof(drv_data->adapter.name)); init_waitqueue_head(&drv_data->waitq); spin_lock_init(&drv_data->lock); drv_data->freq_m = pdata->freq_m; drv_data->freq_n = pdata->freq_n; drv_data->irq = platform_get_irq(pd, 0); if (drv_data->irq < 0) { rc = -ENXIO; goto exit_unmap_regs; } drv_data->adapter.dev.parent = &pd->dev; drv_data->adapter.algo = &mv64xxx_i2c_algo; drv_data->adapter.owner = THIS_MODULE; drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout); drv_data->adapter.nr = pd->id; platform_set_drvdata(pd, drv_data); i2c_set_adapdata(&drv_data->adapter, drv_data); mv64xxx_i2c_hw_init(drv_data); if (request_irq(drv_data->irq, mv64xxx_i2c_intr, 0, MV64XXX_I2C_CTLR_NAME, drv_data)) { dev_err(&drv_data->adapter.dev, "mv64xxx: Can't register intr handler irq: %d\n", drv_data->irq); rc = -EINVAL; goto exit_unmap_regs; } else if ((rc = i2c_add_numbered_adapter(&drv_data->adapter)) != 0) { dev_err(&drv_data->adapter.dev, "mv64xxx: Can't add i2c adapter, rc: %d\n", -rc); goto exit_free_irq; } return 0; exit_free_irq: free_irq(drv_data->irq, drv_data); exit_unmap_regs: mv64xxx_i2c_unmap_regs(drv_data); exit_kfree: kfree(drv_data); return rc; } static int __devexit mv64xxx_i2c_remove(struct platform_device *dev) { struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(dev); int rc; rc = i2c_del_adapter(&drv_data->adapter); free_irq(drv_data->irq, drv_data); mv64xxx_i2c_unmap_regs(drv_data); kfree(drv_data); return rc; } static struct platform_driver mv64xxx_i2c_driver = { .probe = mv64xxx_i2c_probe, .remove = __devexit_p(mv64xxx_i2c_remove), .driver = { .owner = THIS_MODULE, .name = MV64XXX_I2C_CTLR_NAME, }, }; static int __init mv64xxx_i2c_init(void) { return platform_driver_register(&mv64xxx_i2c_driver); } static void __exit mv64xxx_i2c_exit(void) { platform_driver_unregister(&mv64xxx_i2c_driver); } module_init(mv64xxx_i2c_init); module_exit(mv64xxx_i2c_exit); MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>"); MODULE_DESCRIPTION("Marvell mv64xxx host bridge i2c ctlr driver"); MODULE_LICENSE("GPL");
gpl-2.0
SoraBetty/mptcp
arch/parisc/kernel/signal.c
504
17196
/* * linux/arch/parisc/kernel/signal.c: Architecture-specific signal * handling support. * * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org> * Copyright (C) 2000 Linuxcare, Inc. * * Based on the ia64, i386, and alpha versions. * * Like the IA-64, we are a recent enough port (we are *starting* * with glibc2.2) that we do not need to support the old non-realtime * Linux signals. Therefore we don't. HP/UX signals will go in * arch/parisc/hpux/signal.c when we figure out how to do them. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/tracehook.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/compat.h> #include <linux/elf.h> #include <asm/ucontext.h> #include <asm/rt_sigframe.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/cacheflush.h> #include <asm/asm-offsets.h> #ifdef CONFIG_COMPAT #include "signal32.h" #endif #define DEBUG_SIG 0 #define DEBUG_SIG_LEVEL 2 #if DEBUG_SIG #define DBG(LEVEL, ...) \ ((DEBUG_SIG_LEVEL >= LEVEL) \ ? printk(__VA_ARGS__) : (void) 0) #else #define DBG(LEVEL, ...) #endif /* gcc will complain if a pointer is cast to an integer of different * size. If you really need to do this (and we do for an ELF32 user * application in an ELF64 kernel) then you have to do a cast to an * integer of the same size first. The A() macro accomplishes * this. */ #define A(__x) ((unsigned long)(__x)) /* * Do a signal return - restore sigcontext. */ /* Trampoline for calling rt_sigreturn() */ #define INSN_LDI_R25_0 0x34190000 /* ldi 0,%r25 (in_syscall=0) */ #define INSN_LDI_R25_1 0x34190002 /* ldi 1,%r25 (in_syscall=1) */ #define INSN_LDI_R20 0x3414015a /* ldi __NR_rt_sigreturn,%r20 */ #define INSN_BLE_SR2_R0 0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */ #define INSN_NOP 0x08000240 /* nop */ /* For debugging */ #define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */ static long restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) { long err = 0; err |= __copy_from_user(regs->gr, sc->sc_gr, sizeof(regs->gr)); err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr)); err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq)); err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq)); err |= __get_user(regs->sar, &sc->sc_sar); DBG(2,"restore_sigcontext: iaoq is %#lx / %#lx\n", regs->iaoq[0],regs->iaoq[1]); DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]); return err; } void sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) { struct rt_sigframe __user *frame; sigset_t set; unsigned long usp = (regs->gr[30] & ~(0x01UL)); unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE; #ifdef CONFIG_64BIT compat_sigset_t compat_set; struct compat_rt_sigframe __user * compat_frame; if (is_compat_task()) sigframe_size = PARISC_RT_SIGFRAME_SIZE32; #endif current_thread_info()->restart_block.fn = do_no_restart_syscall; /* Unwind the user stack to get the rt_sigframe structure. */ frame = (struct rt_sigframe __user *) (usp - sigframe_size); DBG(2,"sys_rt_sigreturn: frame is %p\n", frame); regs->orig_r28 = 1; /* no restarts for sigreturn */ #ifdef CONFIG_64BIT compat_frame = (struct compat_rt_sigframe __user *)frame; if (is_compat_task()) { DBG(2,"sys_rt_sigreturn: ELF32 process.\n"); if (__copy_from_user(&compat_set, &compat_frame->uc.uc_sigmask, sizeof(compat_set))) goto give_sigsegv; sigset_32to64(&set,&compat_set); } else #endif { if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto give_sigsegv; } set_current_blocked(&set); /* Good thing we saved the old gr[30], eh? */ #ifdef CONFIG_64BIT if (is_compat_task()) { DBG(1,"sys_rt_sigreturn: compat_frame->uc.uc_mcontext 0x%p\n", &compat_frame->uc.uc_mcontext); // FIXME: Load upper half from register file if (restore_sigcontext32(&compat_frame->uc.uc_mcontext, &compat_frame->regs, regs)) goto give_sigsegv; DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n", usp, &compat_frame->uc.uc_stack); if (compat_restore_altstack(&compat_frame->uc.uc_stack)) goto give_sigsegv; } else #endif { DBG(1,"sys_rt_sigreturn: frame->uc.uc_mcontext 0x%p\n", &frame->uc.uc_mcontext); if (restore_sigcontext(&frame->uc.uc_mcontext, regs)) goto give_sigsegv; DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n", usp, &frame->uc.uc_stack); if (restore_altstack(&frame->uc.uc_stack)) goto give_sigsegv; } /* If we are on the syscall path IAOQ will not be restored, and * if we are on the interrupt path we must not corrupt gr31. */ if (in_syscall) regs->gr[31] = regs->iaoq[0]; #if DEBUG_SIG DBG(1,"sys_rt_sigreturn: returning to %#lx, DUMPING REGS:\n", regs->iaoq[0]); show_regs(regs); #endif return; give_sigsegv: DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n"); force_sig(SIGSEGV, current); return; } /* * Set up a signal frame. */ static inline void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) { /*FIXME: ELF32 vs. ELF64 has different frame_size, but since we don't use the parameter it doesn't matter */ DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n", (unsigned long)ka, sp, frame_size); /* Align alternate stack and reserve 64 bytes for the signal handler's frame marker. */ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp)) sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */ DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp); return (void __user *) sp; /* Stacks grow up. Fun. */ } static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_syscall) { unsigned long flags = 0; long err = 0; if (on_sig_stack((unsigned long) sc)) flags |= PARISC_SC_FLAG_ONSTACK; if (in_syscall) { flags |= PARISC_SC_FLAG_IN_SYSCALL; /* regs->iaoq is undefined in the syscall return path */ err |= __put_user(regs->gr[31], &sc->sc_iaoq[0]); err |= __put_user(regs->gr[31]+4, &sc->sc_iaoq[1]); err |= __put_user(regs->sr[3], &sc->sc_iasq[0]); err |= __put_user(regs->sr[3], &sc->sc_iasq[1]); DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (in syscall)\n", regs->gr[31], regs->gr[31]+4); } else { err |= __copy_to_user(sc->sc_iaoq, regs->iaoq, sizeof(regs->iaoq)); err |= __copy_to_user(sc->sc_iasq, regs->iasq, sizeof(regs->iasq)); DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (not in syscall)\n", regs->iaoq[0], regs->iaoq[1]); } err |= __put_user(flags, &sc->sc_flags); err |= __copy_to_user(sc->sc_gr, regs->gr, sizeof(regs->gr)); err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr)); err |= __put_user(regs->sar, &sc->sc_sar); DBG(1,"setup_sigcontext: r28 is %ld\n", regs->gr[28]); return err; } static long setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs, int in_syscall) { struct rt_sigframe __user *frame; unsigned long rp, usp; unsigned long haddr, sigframe_size; int err = 0; #ifdef CONFIG_64BIT struct compat_rt_sigframe __user * compat_frame; compat_sigset_t compat_set; #endif usp = (regs->gr[30] & ~(0x01UL)); /*FIXME: frame_size parameter is unused, remove it. */ frame = get_sigframe(ka, usp, sizeof(*frame)); DBG(1,"SETUP_RT_FRAME: START\n"); DBG(1,"setup_rt_frame: frame %p info %p\n", frame, info); #ifdef CONFIG_64BIT compat_frame = (struct compat_rt_sigframe __user *)frame; if (is_compat_task()) { DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info); err |= copy_siginfo_to_user32(&compat_frame->info, info); err |= __compat_save_altstack( &compat_frame->uc.uc_stack, regs->gr[30]); DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &compat_frame->uc); DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext); err |= setup_sigcontext32(&compat_frame->uc.uc_mcontext, &compat_frame->regs, regs, in_syscall); sigset_64to32(&compat_set,set); err |= __copy_to_user(&compat_frame->uc.uc_sigmask, &compat_set, sizeof(compat_set)); } else #endif { DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &frame->info); err |= copy_siginfo_to_user(&frame->info, info); err |= __save_altstack(&frame->uc.uc_stack, regs->gr[30]); DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &frame->uc); DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &frame->uc.uc_mcontext); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, in_syscall); /* FIXME: Should probably be converted as well for the compat case */ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); } if (err) goto give_sigsegv; /* Set up to return from userspace. If provided, use a stub already in userspace. The first words of tramp are used to save the previous sigrestartblock trampoline that might be on the stack. We start the sigreturn trampoline at SIGRESTARTBLOCK_TRAMP+X. */ err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0, &frame->tramp[SIGRESTARTBLOCK_TRAMP+0]); err |= __put_user(INSN_LDI_R20, &frame->tramp[SIGRESTARTBLOCK_TRAMP+1]); err |= __put_user(INSN_BLE_SR2_R0, &frame->tramp[SIGRESTARTBLOCK_TRAMP+2]); err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]); #if DEBUG_SIG /* Assert that we're flushing in the correct space... */ { unsigned long sid; asm ("mfsp %%sr3,%0" : "=r" (sid)); DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n", sid, frame->tramp); } #endif flush_user_dcache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[TRAMP_SIZE]); flush_user_icache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[TRAMP_SIZE]); /* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP * TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP * So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP */ rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP]; if (err) goto give_sigsegv; haddr = A(ka->sa.sa_handler); /* The sa_handler may be a pointer to a function descriptor */ #ifdef CONFIG_64BIT if (is_compat_task()) { #endif if (haddr & PA_PLABEL_FDESC) { Elf32_Fdesc fdesc; Elf32_Fdesc __user *ufdesc = (Elf32_Fdesc __user *)A(haddr & ~3); err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc)); if (err) goto give_sigsegv; haddr = fdesc.addr; regs->gr[19] = fdesc.gp; } #ifdef CONFIG_64BIT } else { Elf64_Fdesc fdesc; Elf64_Fdesc __user *ufdesc = (Elf64_Fdesc __user *)A(haddr & ~3); err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc)); if (err) goto give_sigsegv; haddr = fdesc.addr; regs->gr[19] = fdesc.gp; DBG(1,"setup_rt_frame: 64 bit signal, exe=%#lx, r19=%#lx, in_syscall=%d\n", haddr, regs->gr[19], in_syscall); } #endif /* The syscall return path will create IAOQ values from r31. */ sigframe_size = PARISC_RT_SIGFRAME_SIZE; #ifdef CONFIG_64BIT if (is_compat_task()) sigframe_size = PARISC_RT_SIGFRAME_SIZE32; #endif if (in_syscall) { regs->gr[31] = haddr; #ifdef CONFIG_64BIT if (!test_thread_flag(TIF_32BIT)) sigframe_size |= 1; #endif } else { unsigned long psw = USER_PSW; #ifdef CONFIG_64BIT if (!test_thread_flag(TIF_32BIT)) psw |= PSW_W; #endif /* If we are singlestepping, arrange a trap to be delivered when we return to userspace. Note the semantics -- we should trap before the first insn in the handler is executed. Ref: http://sources.redhat.com/ml/gdb/2004-11/msg00245.html */ if (pa_psw(current)->r) { pa_psw(current)->r = 0; psw |= PSW_R; mtctl(-1, 0); } regs->gr[0] = psw; regs->iaoq[0] = haddr | 3; regs->iaoq[1] = regs->iaoq[0] + 4; } regs->gr[2] = rp; /* userland return pointer */ regs->gr[26] = sig; /* signal number */ #ifdef CONFIG_64BIT if (is_compat_task()) { regs->gr[25] = A(&compat_frame->info); /* siginfo pointer */ regs->gr[24] = A(&compat_frame->uc); /* ucontext pointer */ } else #endif { regs->gr[25] = A(&frame->info); /* siginfo pointer */ regs->gr[24] = A(&frame->uc); /* ucontext pointer */ } DBG(1,"setup_rt_frame: making sigreturn frame: %#lx + %#lx = %#lx\n", regs->gr[30], sigframe_size, regs->gr[30] + sigframe_size); /* Raise the user stack pointer to make a proper call frame. */ regs->gr[30] = (A(frame) + sigframe_size); DBG(1,"setup_rt_frame: sig deliver (%s,%d) frame=0x%p sp=%#lx iaoq=%#lx/%#lx rp=%#lx\n", current->comm, current->pid, frame, regs->gr[30], regs->iaoq[0], regs->iaoq[1], rp); return 1; give_sigsegv: DBG(1,"setup_rt_frame: sending SIGSEGV\n"); force_sigsegv(sig, current); return 0; } /* * OK, we're invoking a handler. */ static void handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs, int in_syscall) { sigset_t *oldset = sigmask_to_save(); DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n", sig, ka, info, oldset, regs); /* Set up the stack frame */ if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall)) return; signal_delivered(sig, info, ka, regs, test_thread_flag(TIF_SINGLESTEP) || test_thread_flag(TIF_BLOCKSTEP)); DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n", regs->gr[28]); } static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) { if (regs->orig_r28) return; regs->orig_r28 = 1; /* no more restarts */ /* Check the return code */ switch (regs->gr[28]) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: DBG(1,"ERESTARTNOHAND: returning -EINTR\n"); regs->gr[28] = -EINTR; break; case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { DBG(1,"ERESTARTSYS: putting -EINTR\n"); regs->gr[28] = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: /* A syscall is just a branch, so all * we have to do is fiddle the return pointer. */ regs->gr[31] -= 8; /* delayed branching */ break; } } static inline void insert_restart_trampoline(struct pt_regs *regs) { if (regs->orig_r28) return; regs->orig_r28 = 1; /* no more restarts */ switch(regs->gr[28]) { case -ERESTART_RESTARTBLOCK: { /* Restart the system call - no handlers present */ unsigned int *usp = (unsigned int *)regs->gr[30]; /* Setup a trampoline to restart the syscall * with __NR_restart_syscall * * 0: <return address (orig r31)> * 4: <2nd half for 64-bit> * 8: ldw 0(%sp), %r31 * 12: be 0x100(%sr2, %r0) * 16: ldi __NR_restart_syscall, %r20 */ #ifdef CONFIG_64BIT put_user(regs->gr[31] >> 32, &usp[0]); put_user(regs->gr[31] & 0xffffffff, &usp[1]); put_user(0x0fc010df, &usp[2]); #else put_user(regs->gr[31], &usp[0]); put_user(0x0fc0109f, &usp[2]); #endif put_user(0xe0008200, &usp[3]); put_user(0x34140000, &usp[4]); /* Stack is 64-byte aligned, and we only need * to flush 1 cache line. * Flushing one cacheline is cheap. * "sync" on bigger (> 4 way) boxes is not. */ flush_user_dcache_range(regs->gr[30], regs->gr[30] + 4); flush_user_icache_range(regs->gr[30], regs->gr[30] + 4); regs->gr[31] = regs->gr[30] + 8; return; } case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: { /* Hooray for delayed branching. We don't * have to restore %r20 (the system call * number) because it gets loaded in the delay * slot of the branch external instruction. */ regs->gr[31] -= 8; return; } default: break; } } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * We need to be able to restore the syscall arguments (r21-r26) to * restart syscalls. Thus, the syscall path should save them in the * pt_regs structure (it's okay to do so since they are caller-save * registers). As noted below, the syscall number gets restored for * us due to the magic of delayed branching. */ asmlinkage void do_signal(struct pt_regs *regs, long in_syscall) { siginfo_t info; struct k_sigaction ka; int signr; DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n", regs, regs->sr[7], in_syscall); signr = get_signal_to_deliver(&info, &ka, regs, NULL); DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]); if (signr > 0) { /* Restart a system call if necessary. */ if (in_syscall) syscall_restart(regs, &ka); handle_signal(signr, &info, &ka, regs, in_syscall); return; } /* Did we come from a system call? */ if (in_syscall) insert_restart_trampoline(regs); DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n", regs->gr[28]); restore_saved_sigmask(); } void do_notify_resume(struct pt_regs *regs, long in_syscall) { if (test_thread_flag(TIF_SIGPENDING)) do_signal(regs, in_syscall); if (test_thread_flag(TIF_NOTIFY_RESUME)) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } }
gpl-2.0
TheTypoMaster/Fujitsu-Siemens-ESPRIMO-Mobile-V5535
drivers/media/usb/cx231xx/cx231xx-cards.c
504
47732
/* cx231xx-cards.c - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> Based on em28xx driver This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx231xx.h" #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/i2c.h> #include <media/tuner.h> #include <media/tveeprom.h> #include <media/v4l2-common.h> #include <media/cx25840.h> #include "dvb-usb-ids.h" #include "xc5000.h" #include "tda18271.h" static int tuner = -1; module_param(tuner, int, 0444); MODULE_PARM_DESC(tuner, "tuner type"); static int transfer_mode = 1; module_param(transfer_mode, int, 0444); MODULE_PARM_DESC(transfer_mode, "transfer mode (1-ISO or 0-BULK)"); static unsigned int disable_ir; module_param(disable_ir, int, 0444); MODULE_PARM_DESC(disable_ir, "disable infrared remote support"); /* Bitmask marking allocated devices from 0 to CX231XX_MAXBOARDS */ static unsigned long cx231xx_devused; /* * Reset sequences for analog/digital modes */ static struct cx231xx_reg_seq RDE250_XCV_TUNER[] = { {0x03, 0x01, 10}, {0x03, 0x00, 30}, {0x03, 0x01, 10}, {-1, -1, -1}, }; /* * Board definitions */ struct cx231xx_board cx231xx_boards[] = { [CX231XX_BOARD_UNKNOWN] = { .name = "Unknown CX231xx video grabber", .tuner_type = TUNER_ABSENT, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_CARRAERA] = { .name = "Conexant Hybrid TV - CARRAERA", .tuner_type = TUNER_XC5000, .tuner_addr = 0x61, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x02, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_SHELBY] = { .name = "Conexant Hybrid TV - SHELBY", .tuner_type = TUNER_XC5000, .tuner_addr = 0x61, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x32, .norm = V4L2_STD_NTSC, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_RDE_253S] = { .name = "Conexant Hybrid TV - RDE253S", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x1c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x02, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_RDU_253S] = { .name = "Conexant Hybrid TV - RDU253S", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x1c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x02, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_VIDEO_GRABBER] = { .name = "Conexant VIDEO GRABBER", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x1c, .gpio_pin_status_mask = 0x4001000, .norm = V4L2_STD_PAL, .no_alt_vanc = 1, .external_av = 1, /* Actually, it has a 417, but it isn't working correctly. * So set to 0 for now until someone can manage to get this * to work reliably. */ .has_417 = 0, .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_RDE_250] = { .name = "Conexant Hybrid TV - rde 250", .tuner_type = TUNER_XC5000, .tuner_addr = 0x61, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x02, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, } }, }, [CX231XX_BOARD_CNXT_RDU_250] = { .name = "Conexant Hybrid TV - RDU 250", .tuner_type = TUNER_XC5000, .tuner_addr = 0x61, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x32, .norm = V4L2_STD_NTSC, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_EXETER] = { .name = "Hauppauge EXETER", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_1, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x0e, .norm = V4L2_STD_NTSC, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_USBLIVE2] = { .name = "Hauppauge USB Live 2", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .norm = V4L2_STD_NTSC, .no_alt_vanc = 1, .external_av = 1, .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_KWORLD_UB430_USB_HYBRID] = { .name = "Kworld UB430 USB Hybrid", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x11, /* According with PV cxPolaris.inf file */ .tuner_sif_gpio = -1, .tuner_scl_gpio = -1, .tuner_sda_gpio = -1, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_2, .demod_i2c_master = I2C_1_MUX_3, .ir_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x10, .norm = V4L2_STD_PAL_M, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_KWORLD_UB445_USB_HYBRID] = { .name = "Kworld UB445 USB Hybrid", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x11, /* According with PV cxPolaris.inf file */ .tuner_sif_gpio = -1, .tuner_scl_gpio = -1, .tuner_sda_gpio = -1, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_2, .demod_i2c_master = I2C_1_MUX_3, .ir_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x10, .norm = V4L2_STD_NTSC_M, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_PV_PLAYTV_USB_HYBRID] = { .name = "Pixelview PlayTV USB Hybrid", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */ .tuner_sif_gpio = -1, .tuner_scl_gpio = -1, .tuner_sda_gpio = -1, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_2, .demod_i2c_master = I2C_1_MUX_3, .ir_i2c_master = I2C_2, .rc_map_name = RC_MAP_PIXELVIEW_002T, .has_dvb = 1, .demod_addr = 0x10, .norm = V4L2_STD_PAL_M, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_PV_XCAPTURE_USB] = { .name = "Pixelview Xcapture USB", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .norm = V4L2_STD_NTSC, .no_alt_vanc = 1, .external_av = 1, .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_ICONBIT_U100] = { .name = "Iconbit Analog Stick U100 FM", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x1C, .gpio_pin_status_mask = 0x4001000, .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL] = { .name = "Hauppauge WinTV USB2 FM (PAL)", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC] = { .name = "Hauppauge WinTV USB2 FM (NTSC)", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .norm = V4L2_STD_NTSC, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2] = { .name = "Elgato Video Capture V2", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .norm = V4L2_STD_NTSC, .no_alt_vanc = 1, .external_av = 1, .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_OTG102] = { .name = "Geniatech OTG102", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, /* According with PV CxPlrCAP.inf file */ .gpio_pin_status_mask = 0x4001000, .norm = V4L2_STD_NTSC, .no_alt_vanc = 1, .external_av = 1, /*.has_417 = 1, */ /* This board is believed to have a hardware encoding chip * supporting mpeg1/2/4, but as the 417 is apparently not * working for the reference board it is not here either. */ .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx] = { .name = "Hauppauge WinTV 930C-HD (1113xx) / HVR-900H (111xxx) / PCTV QuatroStick 521e", .tuner_type = TUNER_NXP_TDA18271, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x0e, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx] = { .name = "Hauppauge WinTV 930C-HD (1114xx) / HVR-901H (1114xx) / PCTV QuatroStick 522e", .tuner_type = TUNER_ABSENT, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x0e, .norm = V4L2_STD_PAL, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_HAUPPAUGE_955Q] = { .name = "Hauppauge WinTV-HVR-955Q (111401)", .tuner_type = TUNER_ABSENT, .tuner_addr = 0x60, .tuner_gpio = RDE250_XCV_TUNER, .tuner_sif_gpio = 0x05, .tuner_scl_gpio = 0x1a, .tuner_sda_gpio = 0x1b, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .tuner_i2c_master = I2C_1_MUX_3, .demod_i2c_master = I2C_2, .has_dvb = 1, .demod_addr = 0x0e, .norm = V4L2_STD_NTSC, .input = {{ .type = CX231XX_VMUX_TELEVISION, .vmux = CX231XX_VIN_3_1, .amux = CX231XX_AMUX_VIDEO, .gpio = NULL, }, { .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, [CX231XX_BOARD_TERRATEC_GRABBY] = { .name = "Terratec Grabby", .tuner_type = TUNER_ABSENT, .decoder = CX231XX_AVDECODER, .output_mode = OUT_MODE_VIP11, .demod_xfer_mode = 0, .ctl_pin_status_mask = 0xFFFFFFC4, .agc_analog_digital_select_gpio = 0x0c, .gpio_pin_status_mask = 0x4001000, .norm = V4L2_STD_PAL, .no_alt_vanc = 1, .external_av = 1, .input = {{ .type = CX231XX_VMUX_COMPOSITE1, .vmux = CX231XX_VIN_2_1, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, }, { .type = CX231XX_VMUX_SVIDEO, .vmux = CX231XX_VIN_1_1 | (CX231XX_VIN_1_2 << 8) | CX25840_SVIDEO_ON, .amux = CX231XX_AMUX_LINE_IN, .gpio = NULL, } }, }, }; const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards); /* table of devices that work with this driver */ struct usb_device_id cx231xx_id_table[] = { {USB_DEVICE(0x1D19, 0x6109), .driver_info = CX231XX_BOARD_PV_XCAPTURE_USB}, {USB_DEVICE(0x0572, 0x5A3C), .driver_info = CX231XX_BOARD_UNKNOWN}, {USB_DEVICE(0x0572, 0x58A2), .driver_info = CX231XX_BOARD_CNXT_CARRAERA}, {USB_DEVICE(0x0572, 0x58A1), .driver_info = CX231XX_BOARD_CNXT_SHELBY}, {USB_DEVICE(0x0572, 0x58A4), .driver_info = CX231XX_BOARD_CNXT_RDE_253S}, {USB_DEVICE(0x0572, 0x58A5), .driver_info = CX231XX_BOARD_CNXT_RDU_253S}, {USB_DEVICE(0x0572, 0x58A6), .driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER}, {USB_DEVICE(0x0572, 0x589E), .driver_info = CX231XX_BOARD_CNXT_RDE_250}, {USB_DEVICE(0x0572, 0x58A0), .driver_info = CX231XX_BOARD_CNXT_RDU_250}, {USB_DEVICE(0x2040, 0xb110), .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL}, {USB_DEVICE(0x2040, 0xb111), .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC}, {USB_DEVICE(0x2040, 0xb120), .driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER}, {USB_DEVICE(0x2040, 0xb123), .driver_info = CX231XX_BOARD_HAUPPAUGE_955Q}, {USB_DEVICE(0x2040, 0xb130), .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx}, {USB_DEVICE(0x2040, 0xb131), .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx}, /* Hauppauge WinTV-HVR-900-H */ {USB_DEVICE(0x2040, 0xb138), .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx}, /* Hauppauge WinTV-HVR-901-H */ {USB_DEVICE(0x2040, 0xb139), .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx}, {USB_DEVICE(0x2040, 0xb140), .driver_info = CX231XX_BOARD_HAUPPAUGE_EXETER}, {USB_DEVICE(0x2040, 0xc200), .driver_info = CX231XX_BOARD_HAUPPAUGE_USBLIVE2}, /* PCTV QuatroStick 521e */ {USB_DEVICE(0x2013, 0x0259), .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx}, /* PCTV QuatroStick 522e */ {USB_DEVICE(0x2013, 0x025e), .driver_info = CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx}, {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000, 0x4001), .driver_info = CX231XX_BOARD_PV_PLAYTV_USB_HYBRID}, {USB_DEVICE(USB_VID_PIXELVIEW, 0x5014), .driver_info = CX231XX_BOARD_PV_XCAPTURE_USB}, {USB_DEVICE(0x1b80, 0xe424), .driver_info = CX231XX_BOARD_KWORLD_UB430_USB_HYBRID}, {USB_DEVICE(0x1b80, 0xe421), .driver_info = CX231XX_BOARD_KWORLD_UB445_USB_HYBRID}, {USB_DEVICE(0x1f4d, 0x0237), .driver_info = CX231XX_BOARD_ICONBIT_U100}, {USB_DEVICE(0x0fd9, 0x0037), .driver_info = CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2}, {USB_DEVICE(0x1f4d, 0x0102), .driver_info = CX231XX_BOARD_OTG102}, {USB_DEVICE(USB_VID_TERRATEC, 0x00a6), .driver_info = CX231XX_BOARD_TERRATEC_GRABBY}, {}, }; MODULE_DEVICE_TABLE(usb, cx231xx_id_table); /* cx231xx_tuner_callback * will be used to reset XC5000 tuner using GPIO pin */ int cx231xx_tuner_callback(void *ptr, int component, int command, int arg) { int rc = 0; struct cx231xx *dev = ptr; if (dev->tuner_type == TUNER_XC5000) { if (command == XC5000_TUNER_RESET) { dev_dbg(dev->dev, "Tuner CB: RESET: cmd %d : tuner type %d\n", command, dev->tuner_type); cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); msleep(10); cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 0); msleep(330); cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); msleep(10); } } else if (dev->tuner_type == TUNER_NXP_TDA18271) { switch (command) { case TDA18271_CALLBACK_CMD_AGC_ENABLE: if (dev->model == CX231XX_BOARD_PV_PLAYTV_USB_HYBRID) rc = cx231xx_set_agc_analog_digital_mux_select(dev, arg); break; default: rc = -EINVAL; break; } } return rc; } EXPORT_SYMBOL_GPL(cx231xx_tuner_callback); static void cx231xx_reset_out(struct cx231xx *dev) { cx231xx_set_gpio_value(dev, CX23417_RESET, 1); msleep(200); cx231xx_set_gpio_value(dev, CX23417_RESET, 0); msleep(200); cx231xx_set_gpio_value(dev, CX23417_RESET, 1); } static void cx231xx_enable_OSC(struct cx231xx *dev) { cx231xx_set_gpio_value(dev, CX23417_OSC_EN, 1); } static void cx231xx_sleep_s5h1432(struct cx231xx *dev) { cx231xx_set_gpio_value(dev, SLEEP_S5H1432, 0); } static inline void cx231xx_set_model(struct cx231xx *dev) { dev->board = cx231xx_boards[dev->model]; } /* Since cx231xx_pre_card_setup() requires a proper dev->model, * this won't work for boards with generic PCI IDs */ void cx231xx_pre_card_setup(struct cx231xx *dev) { dev_info(dev->dev, "Identified as %s (card=%d)\n", dev->board.name, dev->model); /* set the direction for GPIO pins */ if (dev->board.tuner_gpio) { cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1); cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); } if (dev->board.tuner_sif_gpio >= 0) cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1); /* request some modules if any required */ /* set the mode to Analog mode initially */ cx231xx_set_mode(dev, CX231XX_ANALOG_MODE); /* Unlock device */ /* cx231xx_set_mode(dev, CX231XX_SUSPEND); */ } static void cx231xx_config_tuner(struct cx231xx *dev) { struct tuner_setup tun_setup; struct v4l2_frequency f; if (dev->tuner_type == TUNER_ABSENT) return; tun_setup.mode_mask = T_ANALOG_TV | T_RADIO; tun_setup.type = dev->tuner_type; tun_setup.addr = dev->tuner_addr; tun_setup.tuner_callback = cx231xx_tuner_callback; tuner_call(dev, tuner, s_type_addr, &tun_setup); #if 0 if (tun_setup.type == TUNER_XC5000) { static struct xc2028_ctrl ctrl = { .fname = XC5000_DEFAULT_FIRMWARE, .max_len = 64, .demod = 0; }; struct v4l2_priv_tun_config cfg = { .tuner = dev->tuner_type, .priv = &ctrl, }; tuner_call(dev, tuner, s_config, &cfg); } #endif /* configure tuner */ f.tuner = 0; f.type = V4L2_TUNER_ANALOG_TV; f.frequency = 9076; /* just a magic number */ dev->ctl_freq = f.frequency; call_all(dev, tuner, s_frequency, &f); } static int read_eeprom(struct cx231xx *dev, struct i2c_client *client, u8 *eedata, int len) { int ret = 0; u8 start_offset = 0; int len_todo = len; u8 *eedata_cur = eedata; int i; struct i2c_msg msg_write = { .addr = client->addr, .flags = 0, .buf = &start_offset, .len = 1 }; struct i2c_msg msg_read = { .addr = client->addr, .flags = I2C_M_RD }; /* start reading at offset 0 */ ret = i2c_transfer(client->adapter, &msg_write, 1); if (ret < 0) { dev_err(dev->dev, "Can't read eeprom\n"); return ret; } while (len_todo > 0) { msg_read.len = (len_todo > 64) ? 64 : len_todo; msg_read.buf = eedata_cur; ret = i2c_transfer(client->adapter, &msg_read, 1); if (ret < 0) { dev_err(dev->dev, "Can't read eeprom\n"); return ret; } eedata_cur += msg_read.len; len_todo -= msg_read.len; } for (i = 0; i + 15 < len; i += 16) dev_dbg(dev->dev, "i2c eeprom %02x: %*ph\n", i, 16, &eedata[i]); return 0; } void cx231xx_card_setup(struct cx231xx *dev) { cx231xx_set_model(dev); dev->tuner_type = cx231xx_boards[dev->model].tuner_type; if (cx231xx_boards[dev->model].tuner_addr) dev->tuner_addr = cx231xx_boards[dev->model].tuner_addr; /* request some modules */ if (dev->board.decoder == CX231XX_AVDECODER) { dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, cx231xx_get_i2c_adap(dev, I2C_0), "cx25840", 0x88 >> 1, NULL); if (dev->sd_cx25840 == NULL) dev_err(dev->dev, "cx25840 subdev registration failure\n"); cx25840_call(dev, core, load_fw); } /* Initialize the tuner */ if (dev->board.tuner_type != TUNER_ABSENT) { struct i2c_adapter *tuner_i2c = cx231xx_get_i2c_adap(dev, dev->board.tuner_i2c_master); dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, tuner_i2c, "tuner", dev->tuner_addr, NULL); if (dev->sd_tuner == NULL) dev_err(dev->dev, "tuner subdev registration failure\n"); else cx231xx_config_tuner(dev); } switch (dev->model) { case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx: case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx: case CX231XX_BOARD_HAUPPAUGE_955Q: { struct eeprom { struct tveeprom tvee; u8 eeprom[256]; struct i2c_client client; }; struct eeprom *e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) { dev_err(dev->dev, "failed to allocate memory to read eeprom\n"); break; } e->client.adapter = cx231xx_get_i2c_adap(dev, I2C_1_MUX_1); e->client.addr = 0xa0 >> 1; read_eeprom(dev, &e->client, e->eeprom, sizeof(e->eeprom)); tveeprom_hauppauge_analog(&e->client, &e->tvee, e->eeprom + 0xc0); kfree(e); break; } } } /* * cx231xx_config() * inits registers with sane defaults */ int cx231xx_config(struct cx231xx *dev) { /* TBD need to add cx231xx specific code */ return 0; } /* * cx231xx_config_i2c() * configure i2c attached devices */ void cx231xx_config_i2c(struct cx231xx *dev) { /* u32 input = INPUT(dev->video_input)->vmux; */ call_all(dev, video, s_stream, 1); } static void cx231xx_unregister_media_device(struct cx231xx *dev) { #ifdef CONFIG_MEDIA_CONTROLLER if (dev->media_dev) { media_device_unregister(dev->media_dev); kfree(dev->media_dev); dev->media_dev = NULL; } #endif } /* * cx231xx_realease_resources() * unregisters the v4l2,i2c and usb devices * called when the device gets disconected or at module unload */ void cx231xx_release_resources(struct cx231xx *dev) { cx231xx_unregister_media_device(dev); cx231xx_release_analog_resources(dev); cx231xx_remove_from_devlist(dev); cx231xx_ir_exit(dev); /* Release I2C buses */ cx231xx_dev_uninit(dev); /* delete v4l2 device */ v4l2_device_unregister(&dev->v4l2_dev); usb_put_dev(dev->udev); /* Mark device as unused */ clear_bit(dev->devno, &cx231xx_devused); } static void cx231xx_media_device_register(struct cx231xx *dev, struct usb_device *udev) { #ifdef CONFIG_MEDIA_CONTROLLER struct media_device *mdev; int ret; mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return; mdev->dev = dev->dev; strlcpy(mdev->model, dev->board.name, sizeof(mdev->model)); if (udev->serial) strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial)); strcpy(mdev->bus_info, udev->devpath); mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); mdev->driver_version = LINUX_VERSION_CODE; ret = media_device_register(mdev); if (ret) { dev_err(dev->dev, "Couldn't create a media device. Error: %d\n", ret); kfree(mdev); return; } dev->media_dev = mdev; #endif } static void cx231xx_create_media_graph(struct cx231xx *dev) { #ifdef CONFIG_MEDIA_CONTROLLER struct media_device *mdev = dev->media_dev; struct media_entity *entity; struct media_entity *tuner = NULL, *decoder = NULL; if (!mdev) return; media_device_for_each_entity(entity, mdev) { switch (entity->type) { case MEDIA_ENT_T_V4L2_SUBDEV_TUNER: tuner = entity; break; case MEDIA_ENT_T_V4L2_SUBDEV_DECODER: decoder = entity; break; } } /* Analog setup, using tuner as a link */ if (!decoder) return; if (tuner) media_entity_create_link(tuner, 0, decoder, 0, MEDIA_LNK_FL_ENABLED); media_entity_create_link(decoder, 1, &dev->vdev.entity, 0, MEDIA_LNK_FL_ENABLED); media_entity_create_link(decoder, 2, &dev->vbi_dev.entity, 0, MEDIA_LNK_FL_ENABLED); #endif } /* * cx231xx_init_dev() * allocates and inits the device structs, registers i2c bus and v4l device */ static int cx231xx_init_dev(struct cx231xx *dev, struct usb_device *udev, int minor) { int retval = -ENOMEM; unsigned int maxh, maxw; dev->udev = udev; mutex_init(&dev->lock); mutex_init(&dev->ctrl_urb_lock); mutex_init(&dev->gpio_i2c_lock); mutex_init(&dev->i2c_lock); spin_lock_init(&dev->video_mode.slock); spin_lock_init(&dev->vbi_mode.slock); spin_lock_init(&dev->sliced_cc_mode.slock); init_waitqueue_head(&dev->open); init_waitqueue_head(&dev->wait_frame); init_waitqueue_head(&dev->wait_stream); dev->cx231xx_read_ctrl_reg = cx231xx_read_ctrl_reg; dev->cx231xx_write_ctrl_reg = cx231xx_write_ctrl_reg; dev->cx231xx_send_usb_command = cx231xx_send_usb_command; dev->cx231xx_gpio_i2c_read = cx231xx_gpio_i2c_read; dev->cx231xx_gpio_i2c_write = cx231xx_gpio_i2c_write; /* Query cx231xx to find what pcb config it is related to */ retval = initialize_cx231xx(dev); if (retval < 0) { dev_err(dev->dev, "Failed to read PCB config\n"); return retval; } /*To workaround error number=-71 on EP0 for VideoGrabber, need set alt here.*/ if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER || dev->model == CX231XX_BOARD_HAUPPAUGE_USBLIVE2) { cx231xx_set_alt_setting(dev, INDEX_VIDEO, 3); cx231xx_set_alt_setting(dev, INDEX_VANC, 1); } /* Cx231xx pre card setup */ cx231xx_pre_card_setup(dev); retval = cx231xx_config(dev); if (retval) { dev_err(dev->dev, "error configuring device\n"); return -ENOMEM; } /* set default norm */ dev->norm = dev->board.norm; /* register i2c bus */ retval = cx231xx_dev_init(dev); if (retval) { dev_err(dev->dev, "%s: cx231xx_i2c_register - errCode [%d]!\n", __func__, retval); goto err_dev_init; } /* Do board specific init */ cx231xx_card_setup(dev); /* configure the device */ cx231xx_config_i2c(dev); maxw = norm_maxw(dev); maxh = norm_maxh(dev); /* set default image size */ dev->width = maxw; dev->height = maxh; dev->interlaced = 0; dev->video_input = 0; retval = cx231xx_config(dev); if (retval) { dev_err(dev->dev, "%s: cx231xx_config - errCode [%d]!\n", __func__, retval); goto err_dev_init; } /* init video dma queues */ INIT_LIST_HEAD(&dev->video_mode.vidq.active); INIT_LIST_HEAD(&dev->video_mode.vidq.queued); /* init vbi dma queues */ INIT_LIST_HEAD(&dev->vbi_mode.vidq.active); INIT_LIST_HEAD(&dev->vbi_mode.vidq.queued); /* Reset other chips required if they are tied up with GPIO pins */ cx231xx_add_into_devlist(dev); if (dev->board.has_417) { dev_info(dev->dev, "attach 417 %d\n", dev->model); if (cx231xx_417_register(dev) < 0) { dev_err(dev->dev, "%s() Failed to register 417 on VID_B\n", __func__); } } retval = cx231xx_register_analog_devices(dev); if (retval) goto err_analog; cx231xx_ir_init(dev); cx231xx_init_extension(dev); return 0; err_analog: cx231xx_unregister_media_device(dev); cx231xx_release_analog_resources(dev); cx231xx_remove_from_devlist(dev); err_dev_init: cx231xx_dev_uninit(dev); return retval; } #if defined(CONFIG_MODULES) && defined(MODULE) static void request_module_async(struct work_struct *work) { struct cx231xx *dev = container_of(work, struct cx231xx, request_module_wk); if (dev->has_alsa_audio) request_module("cx231xx-alsa"); if (dev->board.has_dvb) request_module("cx231xx-dvb"); } static void request_modules(struct cx231xx *dev) { INIT_WORK(&dev->request_module_wk, request_module_async); schedule_work(&dev->request_module_wk); } static void flush_request_modules(struct cx231xx *dev) { flush_work(&dev->request_module_wk); } #else #define request_modules(dev) #define flush_request_modules(dev) #endif /* CONFIG_MODULES */ static int cx231xx_init_v4l2(struct cx231xx *dev, struct usb_device *udev, struct usb_interface *interface, int isoc_pipe) { struct usb_interface *uif; int i, idx; /* Video Init */ /* compute alternate max packet sizes for video */ idx = dev->current_pcb_config.hs_config_info[0].interface_info.video_index + 1; if (idx >= dev->max_iad_interface_count) { dev_err(dev->dev, "Video PCB interface #%d doesn't exist\n", idx); return -ENODEV; } uif = udev->actconfig->interface[idx]; dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress; dev->video_mode.num_alt = uif->num_altsetting; dev_info(dev->dev, "video EndPoint Addr 0x%x, Alternate settings: %i\n", dev->video_mode.end_point_addr, dev->video_mode.num_alt); dev->video_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->video_mode.num_alt, GFP_KERNEL); if (dev->video_mode.alt_max_pkt_size == NULL) return -ENOMEM; for (i = 0; i < dev->video_mode.num_alt; i++) { u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize); dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); dev_dbg(dev->dev, "Alternate setting %i, max size= %i\n", i, dev->video_mode.alt_max_pkt_size[i]); } /* VBI Init */ idx = dev->current_pcb_config.hs_config_info[0].interface_info.vanc_index + 1; if (idx >= dev->max_iad_interface_count) { dev_err(dev->dev, "VBI PCB interface #%d doesn't exist\n", idx); return -ENODEV; } uif = udev->actconfig->interface[idx]; dev->vbi_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc. bEndpointAddress; dev->vbi_mode.num_alt = uif->num_altsetting; dev_info(dev->dev, "VBI EndPoint Addr 0x%x, Alternate settings: %i\n", dev->vbi_mode.end_point_addr, dev->vbi_mode.num_alt); /* compute alternate max packet sizes for vbi */ dev->vbi_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->vbi_mode.num_alt, GFP_KERNEL); if (dev->vbi_mode.alt_max_pkt_size == NULL) return -ENOMEM; for (i = 0; i < dev->vbi_mode.num_alt; i++) { u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. desc.wMaxPacketSize); dev->vbi_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); dev_dbg(dev->dev, "Alternate setting %i, max size= %i\n", i, dev->vbi_mode.alt_max_pkt_size[i]); } /* Sliced CC VBI init */ /* compute alternate max packet sizes for sliced CC */ idx = dev->current_pcb_config.hs_config_info[0].interface_info.hanc_index + 1; if (idx >= dev->max_iad_interface_count) { dev_err(dev->dev, "Sliced CC PCB interface #%d doesn't exist\n", idx); return -ENODEV; } uif = udev->actconfig->interface[idx]; dev->sliced_cc_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc. bEndpointAddress; dev->sliced_cc_mode.num_alt = uif->num_altsetting; dev_info(dev->dev, "sliced CC EndPoint Addr 0x%x, Alternate settings: %i\n", dev->sliced_cc_mode.end_point_addr, dev->sliced_cc_mode.num_alt); dev->sliced_cc_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->sliced_cc_mode.num_alt, GFP_KERNEL); if (dev->sliced_cc_mode.alt_max_pkt_size == NULL) return -ENOMEM; for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) { u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. desc.wMaxPacketSize); dev->sliced_cc_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); dev_dbg(dev->dev, "Alternate setting %i, max size= %i\n", i, dev->sliced_cc_mode.alt_max_pkt_size[i]); } return 0; } /* * cx231xx_usb_probe() * checks for supported devices */ static int cx231xx_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev; struct device *d = &interface->dev; struct usb_interface *uif; struct cx231xx *dev = NULL; int retval = -ENODEV; int nr = 0, ifnum; int i, isoc_pipe = 0; char *speed; u8 idx; struct usb_interface_assoc_descriptor *assoc_desc; ifnum = interface->altsetting[0].desc.bInterfaceNumber; /* * Interface number 0 - IR interface (handled by mceusb driver) * Interface number 1 - AV interface (handled by this driver) */ if (ifnum != 1) return -ENODEV; /* Check to see next free device and mark as used */ do { nr = find_first_zero_bit(&cx231xx_devused, CX231XX_MAXBOARDS); if (nr >= CX231XX_MAXBOARDS) { /* No free device slots */ dev_err(d, "Supports only %i devices.\n", CX231XX_MAXBOARDS); return -ENOMEM; } } while (test_and_set_bit(nr, &cx231xx_devused)); udev = usb_get_dev(interface_to_usbdev(interface)); /* allocate memory for our device state and initialize it */ dev = devm_kzalloc(&udev->dev, sizeof(*dev), GFP_KERNEL); if (dev == NULL) { retval = -ENOMEM; goto err_if; } snprintf(dev->name, 29, "cx231xx #%d", nr); dev->devno = nr; dev->model = id->driver_info; dev->video_mode.alt = -1; dev->dev = d; cx231xx_set_model(dev); dev->interface_count++; /* reset gpio dir and value */ dev->gpio_dir = 0; dev->gpio_val = 0; dev->xc_fw_load_done = 0; dev->has_alsa_audio = 1; dev->power_mode = -1; atomic_set(&dev->devlist_count, 0); /* 0 - vbi ; 1 -sliced cc mode */ dev->vbi_or_sliced_cc_mode = 0; /* get maximum no.of IAD interfaces */ dev->max_iad_interface_count = udev->config->desc.bNumInterfaces; /* init CIR module TBD */ /*mode_tv: digital=1 or analog=0*/ dev->mode_tv = 0; dev->USE_ISO = transfer_mode; switch (udev->speed) { case USB_SPEED_LOW: speed = "1.5"; break; case USB_SPEED_UNKNOWN: case USB_SPEED_FULL: speed = "12"; break; case USB_SPEED_HIGH: speed = "480"; break; default: speed = "unknown"; } dev_info(d, "New device %s %s @ %s Mbps (%04x:%04x) with %d interfaces\n", udev->manufacturer ? udev->manufacturer : "", udev->product ? udev->product : "", speed, le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), dev->max_iad_interface_count); /* increment interface count */ dev->interface_count++; /* get device number */ nr = dev->devno; assoc_desc = udev->actconfig->intf_assoc[0]; if (assoc_desc->bFirstInterface != ifnum) { dev_err(d, "Not found matching IAD interface\n"); retval = -ENODEV; goto err_if; } dev_dbg(d, "registering interface %d\n", ifnum); /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); /* Register the media controller */ cx231xx_media_device_register(dev, udev); /* Create v4l2 device */ #ifdef CONFIG_MEDIA_CONTROLLER dev->v4l2_dev.mdev = dev->media_dev; #endif retval = v4l2_device_register(&interface->dev, &dev->v4l2_dev); if (retval) { dev_err(d, "v4l2_device_register failed\n"); goto err_v4l2; } /* allocate device struct */ retval = cx231xx_init_dev(dev, udev, nr); if (retval) goto err_init; retval = cx231xx_init_v4l2(dev, udev, interface, isoc_pipe); if (retval) goto err_init; if (dev->current_pcb_config.ts1_source != 0xff) { /* compute alternate max packet sizes for TS1 */ idx = dev->current_pcb_config.hs_config_info[0].interface_info.ts1_index + 1; if (idx >= dev->max_iad_interface_count) { dev_err(d, "TS1 PCB interface #%d doesn't exist\n", idx); retval = -ENODEV; goto err_video_alt; } uif = udev->actconfig->interface[idx]; dev->ts1_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe]. desc.bEndpointAddress; dev->ts1_mode.num_alt = uif->num_altsetting; dev_info(d, "TS EndPoint Addr 0x%x, Alternate settings: %i\n", dev->ts1_mode.end_point_addr, dev->ts1_mode.num_alt); dev->ts1_mode.alt_max_pkt_size = devm_kmalloc_array(&udev->dev, 32, dev->ts1_mode.num_alt, GFP_KERNEL); if (dev->ts1_mode.alt_max_pkt_size == NULL) { retval = -ENOMEM; goto err_video_alt; } for (i = 0; i < dev->ts1_mode.num_alt; i++) { u16 tmp = le16_to_cpu(uif->altsetting[i]. endpoint[isoc_pipe].desc. wMaxPacketSize); dev->ts1_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); dev_dbg(d, "Alternate setting %i, max size= %i\n", i, dev->ts1_mode.alt_max_pkt_size[i]); } } if (dev->model == CX231XX_BOARD_CNXT_VIDEO_GRABBER) { cx231xx_enable_OSC(dev); cx231xx_reset_out(dev); cx231xx_set_alt_setting(dev, INDEX_VIDEO, 3); } if (dev->model == CX231XX_BOARD_CNXT_RDE_253S) cx231xx_sleep_s5h1432(dev); /* load other modules required */ request_modules(dev); cx231xx_create_media_graph(dev); return 0; err_video_alt: /* cx231xx_uninit_dev: */ cx231xx_close_extension(dev); cx231xx_ir_exit(dev); cx231xx_release_analog_resources(dev); cx231xx_417_unregister(dev); cx231xx_remove_from_devlist(dev); cx231xx_dev_uninit(dev); err_init: v4l2_device_unregister(&dev->v4l2_dev); err_v4l2: usb_set_intfdata(interface, NULL); err_if: usb_put_dev(udev); clear_bit(nr, &cx231xx_devused); return retval; } /* * cx231xx_usb_disconnect() * called when the device gets diconencted * video device will be unregistered on v4l2_close in case it is still open */ static void cx231xx_usb_disconnect(struct usb_interface *interface) { struct cx231xx *dev; dev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); if (!dev) return; if (!dev->udev) return; dev->state |= DEV_DISCONNECTED; flush_request_modules(dev); /* wait until all current v4l2 io is finished then deallocate resources */ mutex_lock(&dev->lock); wake_up_interruptible_all(&dev->open); if (dev->users) { dev_warn(dev->dev, "device %s is open! Deregistration and memory deallocation are deferred on close.\n", video_device_node_name(&dev->vdev)); /* Even having users, it is safe to remove the RC i2c driver */ cx231xx_ir_exit(dev); if (dev->USE_ISO) cx231xx_uninit_isoc(dev); else cx231xx_uninit_bulk(dev); wake_up_interruptible(&dev->wait_frame); wake_up_interruptible(&dev->wait_stream); } else { } cx231xx_close_extension(dev); mutex_unlock(&dev->lock); if (!dev->users) cx231xx_release_resources(dev); } static struct usb_driver cx231xx_usb_driver = { .name = "cx231xx", .probe = cx231xx_usb_probe, .disconnect = cx231xx_usb_disconnect, .id_table = cx231xx_id_table, }; module_usb_driver(cx231xx_usb_driver);
gpl-2.0
PyYoshi/ponyo_kernel
net/ipv6/raw.c
760
31679
/* * RAW sockets for IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Adapted from linux/net/ipv4/raw.c * * Fixes: * Hideaki YOSHIFUJI : sin6_scope_id support * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance) * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/slab.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/icmpv6.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <linux/skbuff.h> #include <asm/uaccess.h> #include <asm/ioctls.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/ndisc.h> #include <net/protocol.h> #include <net/ip6_route.h> #include <net/ip6_checksum.h> #include <net/addrconf.h> #include <net/transp_v6.h> #include <net/udp.h> #include <net/inet_common.h> #include <net/tcp_states.h> #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) #include <net/mip6.h> #endif #include <linux/mroute6.h> #include <net/raw.h> #include <net/rawv6.h> #include <net/xfrm.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> static struct raw_hashinfo raw_v6_hashinfo = { .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock), }; static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, unsigned short num, struct in6_addr *loc_addr, struct in6_addr *rmt_addr, int dif) { struct hlist_node *node; int is_multicast = ipv6_addr_is_multicast(loc_addr); sk_for_each_from(sk, node) if (inet_sk(sk)->inet_num == num) { struct ipv6_pinfo *np = inet6_sk(sk); if (!net_eq(sock_net(sk), net)) continue; if (!ipv6_addr_any(&np->daddr) && !ipv6_addr_equal(&np->daddr, rmt_addr)) continue; if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) continue; if (!ipv6_addr_any(&np->rcv_saddr)) { if (ipv6_addr_equal(&np->rcv_saddr, loc_addr)) goto found; if (is_multicast && inet6_mc_check(sk, loc_addr, rmt_addr)) goto found; continue; } goto found; } sk = NULL; found: return sk; } /* * 0 - deliver * 1 - block */ static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb) { struct icmp6hdr *icmph; struct raw6_sock *rp = raw6_sk(sk); if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) { __u32 *data = &rp->filter.data[0]; int bit_nr; icmph = (struct icmp6hdr *) skb->data; bit_nr = icmph->icmp6_type; return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0; } return 0; } #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) static int (*mh_filter)(struct sock *sock, struct sk_buff *skb); int rawv6_mh_filter_register(int (*filter)(struct sock *sock, struct sk_buff *skb)) { rcu_assign_pointer(mh_filter, filter); return 0; } EXPORT_SYMBOL(rawv6_mh_filter_register); int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock, struct sk_buff *skb)) { rcu_assign_pointer(mh_filter, NULL); synchronize_rcu(); return 0; } EXPORT_SYMBOL(rawv6_mh_filter_unregister); #endif /* * demultiplex raw sockets. * (should consider queueing the skb in the sock receive_queue * without calling rawv6.c) * * Caller owns SKB so we must make clones. */ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) { struct in6_addr *saddr; struct in6_addr *daddr; struct sock *sk; int delivered = 0; __u8 hash; struct net *net; saddr = &ipv6_hdr(skb)->saddr; daddr = saddr + 1; hash = nexthdr & (MAX_INET_PROTOS - 1); read_lock(&raw_v6_hashinfo.lock); sk = sk_head(&raw_v6_hashinfo.ht[hash]); if (sk == NULL) goto out; net = dev_net(skb->dev); sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, IP6CB(skb)->iif); while (sk) { int filtered; delivered = 1; switch (nexthdr) { case IPPROTO_ICMPV6: filtered = icmpv6_filter(sk, skb); break; #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case IPPROTO_MH: { /* XXX: To validate MH only once for each packet, * this is placed here. It should be after checking * xfrm policy, however it doesn't. The checking xfrm * policy is placed in rawv6_rcv() because it is * required for each socket. */ int (*filter)(struct sock *sock, struct sk_buff *skb); filter = rcu_dereference(mh_filter); filtered = filter ? filter(sk, skb) : 0; break; } #endif default: filtered = 0; break; } if (filtered < 0) break; if (filtered == 0) { struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ if (clone) { nf_reset(clone); rawv6_rcv(sk, clone); } } sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr, IP6CB(skb)->iif); } out: read_unlock(&raw_v6_hashinfo.lock); return delivered; } int raw6_local_deliver(struct sk_buff *skb, int nexthdr) { struct sock *raw_sk; raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (MAX_INET_PROTOS - 1)]); if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) raw_sk = NULL; return raw_sk != NULL; } /* This cleans up af_inet6 a bit. -DaveM */ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr; __be32 v4addr = 0; int addr_type; int err; if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; addr_type = ipv6_addr_type(&addr->sin6_addr); /* Raw sockets are IPv6 only */ if (addr_type == IPV6_ADDR_MAPPED) return -EADDRNOTAVAIL; lock_sock(sk); err = -EINVAL; if (sk->sk_state != TCP_CLOSE) goto out; rcu_read_lock(); /* Check if the address belongs to the host. */ if (addr_type != IPV6_ADDR_ANY) { struct net_device *dev = NULL; if (addr_type & IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && addr->sin6_scope_id) { /* Override any existing binding, if another * one is supplied by user. */ sk->sk_bound_dev_if = addr->sin6_scope_id; } /* Binding to link-local address requires an interface */ if (!sk->sk_bound_dev_if) goto out_unlock; err = -ENODEV; dev = dev_get_by_index_rcu(sock_net(sk), sk->sk_bound_dev_if); if (!dev) goto out_unlock; } /* ipv4 addr of the socket is invalid. Only the * unspecified and mapped address have a v4 equivalent. */ v4addr = LOOPBACK4_IPV6; if (!(addr_type & IPV6_ADDR_MULTICAST)) { err = -EADDRNOTAVAIL; if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr, dev, 0)) { goto out_unlock; } } } inet->inet_rcv_saddr = inet->inet_saddr = v4addr; ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); if (!(addr_type & IPV6_ADDR_MULTICAST)) ipv6_addr_copy(&np->saddr, &addr->sin6_addr); err = 0; out_unlock: rcu_read_unlock(); out: release_sock(sk); return err; } static void rawv6_err(struct sock *sk, struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); int err; int harderr; /* Report error on raw socket, if: 1. User requested recverr. 2. Socket is connected (otherwise the error indication is useless without recverr and error is hard. */ if (!np->recverr && sk->sk_state != TCP_ESTABLISHED) return; harderr = icmpv6_err_convert(type, code, &err); if (type == ICMPV6_PKT_TOOBIG) harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); if (np->recverr) { u8 *payload = skb->data; if (!inet->hdrincl) payload += offset; ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload); } if (np->recverr || harderr) { sk->sk_err = err; sk->sk_error_report(sk); } } void raw6_icmp_error(struct sk_buff *skb, int nexthdr, u8 type, u8 code, int inner_offset, __be32 info) { struct sock *sk; int hash; struct in6_addr *saddr, *daddr; struct net *net; hash = nexthdr & (RAW_HTABLE_SIZE - 1); read_lock(&raw_v6_hashinfo.lock); sk = sk_head(&raw_v6_hashinfo.ht[hash]); if (sk != NULL) { /* Note: ipv6_hdr(skb) != skb->data */ struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data; saddr = &ip6h->saddr; daddr = &ip6h->daddr; net = dev_net(skb->dev); while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr, IP6CB(skb)->iif))) { rawv6_err(sk, skb, NULL, type, code, inner_offset, info); sk = sk_next(sk); } } read_unlock(&raw_v6_hashinfo.lock); } static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) { if ((raw6_sk(sk)->checksum || sk->sk_filter) && skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } /* Charge it to the socket. */ if (ip_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); return NET_RX_DROP; } return 0; } /* * This is next to useless... * if we demultiplex in network layer we don't need the extra call * just to queue the skb... * maybe we could have the network decide upon a hint if it * should call raw_rcv for demultiplexing */ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) { struct inet_sock *inet = inet_sk(sk); struct raw6_sock *rp = raw6_sk(sk); if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } if (!rp->checksum) skb->ip_summed = CHECKSUM_UNNECESSARY; if (skb->ip_summed == CHECKSUM_COMPLETE) { skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len, inet->inet_num, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } if (!skb_csum_unnecessary(skb)) skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len, inet->inet_num, 0)); if (inet->hdrincl) { if (skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); kfree_skb(skb); return NET_RX_DROP; } } rawv6_rcv_skb(sk, skb); return 0; } /* * This should be easy, if there is something there * we return it, otherwise we block. */ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name; struct sk_buff *skb; size_t copied; int err; if (flags & MSG_OOB) return -EOPNOTSUPP; if (addr_len) *addr_len=sizeof(*sin6); if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len); if (np->rxpmtu && np->rxopt.bits.rxpmtu) return ipv6_recv_rxpmtu(sk, msg, len); skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } if (skb_csum_unnecessary(skb)) { err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else if (msg->msg_flags&MSG_TRUNC) { if (__skb_checksum_complete(skb)) goto csum_copy_err; err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); } else { err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (err) goto out_free; /* Copy the address. */ if (sin6) { sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr); sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = 0; if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) sin6->sin6_scope_id = IP6CB(skb)->iif; } sock_recv_ts_and_drops(msg, sk, skb); if (np->rxopt.all) datagram_recv_ctl(sk, msg, skb); err = copied; if (flags & MSG_TRUNC) err = skb->len; out_free: skb_free_datagram(sk, skb); out: return err; csum_copy_err: skb_kill_datagram(sk, skb, flags); /* Error for blocking case is chosen to masquerade as some normal condition. */ err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; goto out; } static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct raw6_sock *rp) { struct sk_buff *skb; int err = 0; int offset; int len; int total_len; __wsum tmp_csum; __sum16 csum; if (!rp->checksum) goto send; if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; offset = rp->offset; total_len = inet_sk(sk)->cork.length - (skb_network_header(skb) - skb->data); if (offset >= total_len - 1) { err = -EINVAL; ip6_flush_pending_frames(sk); goto out; } /* should be check HW csum miyazawa */ if (skb_queue_len(&sk->sk_write_queue) == 1) { /* * Only one fragment on the socket. */ tmp_csum = skb->csum; } else { struct sk_buff *csum_skb = NULL; tmp_csum = 0; skb_queue_walk(&sk->sk_write_queue, skb) { tmp_csum = csum_add(tmp_csum, skb->csum); if (csum_skb) continue; len = skb->len - skb_transport_offset(skb); if (offset >= len) { offset -= len; continue; } csum_skb = skb; } skb = csum_skb; } offset += skb_transport_offset(skb); if (skb_copy_bits(skb, offset, &csum, 2)) BUG(); /* in case cksum was not initialized */ if (unlikely(csum)) tmp_csum = csum_sub(tmp_csum, csum_unfold(csum)); csum = csum_ipv6_magic(&fl->fl6_src, &fl->fl6_dst, total_len, fl->proto, tmp_csum); if (csum == 0 && fl->proto == IPPROTO_UDP) csum = CSUM_MANGLED_0; if (skb_store_bits(skb, offset, &csum, 2)) BUG(); send: err = ip6_push_pending_frames(sk); out: return err; } static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, struct flowi *fl, struct rt6_info *rt, unsigned int flags) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *iph; struct sk_buff *skb; int err; if (length > rt->u.dst.dev->mtu) { ipv6_local_error(sk, EMSGSIZE, fl, rt->u.dst.dev->mtu); return -EMSGSIZE; } if (flags&MSG_PROBE) goto out; skb = sock_alloc_send_skb(sk, length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, dst_clone(&rt->u.dst)); skb_put(skb, length); skb_reset_network_header(skb); iph = ipv6_hdr(skb); skb->ip_summed = CHECKSUM_NONE; skb->transport_header = skb->network_header; err = memcpy_fromiovecend((void *)iph, from, 0, length); if (err) goto error_fault; IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev, dst_output); if (err > 0) err = net_xmit_errno(err); if (err) goto error; out: return 0; error_fault: err = -EFAULT; kfree_skb(skb); error: IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); if (err == -ENOBUFS && !np->recverr) err = 0; return err; } static int rawv6_probe_proto_opt(struct flowi *fl, struct msghdr *msg) { struct iovec *iov; u8 __user *type = NULL; u8 __user *code = NULL; u8 len = 0; int probed = 0; int i; if (!msg->msg_iov) return 0; for (i = 0; i < msg->msg_iovlen; i++) { iov = &msg->msg_iov[i]; if (!iov) continue; switch (fl->proto) { case IPPROTO_ICMPV6: /* check if one-byte field is readable or not. */ if (iov->iov_base && iov->iov_len < 1) break; if (!type) { type = iov->iov_base; /* check if code field is readable or not. */ if (iov->iov_len > 1) code = type + 1; } else if (!code) code = iov->iov_base; if (type && code) { if (get_user(fl->fl_icmp_type, type) || get_user(fl->fl_icmp_code, code)) return -EFAULT; probed = 1; } break; case IPPROTO_MH: if (iov->iov_base && iov->iov_len < 1) break; /* check if type field is readable or not. */ if (iov->iov_len > 2 - len) { u8 __user *p = iov->iov_base; if (get_user(fl->fl_mh_type, &p[2 - len])) return -EFAULT; probed = 1; } else len += iov->iov_len; break; default: probed = 1; break; } if (probed) break; } return 0; } static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct ipv6_txoptions opt_space; struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name; struct in6_addr *daddr, *final_p = NULL, final; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct raw6_sock *rp = raw6_sk(sk); struct ipv6_txoptions *opt = NULL; struct ip6_flowlabel *flowlabel = NULL; struct dst_entry *dst = NULL; struct flowi fl; int addr_len = msg->msg_namelen; int hlimit = -1; int tclass = -1; int dontfrag = -1; u16 proto; int err; /* Rough check on arithmetic overflow, better check is made in ip6_append_data(). */ if (len > INT_MAX) return -EMSGSIZE; /* Mirror BSD error message compatibility */ if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* * Get and verify the address. */ memset(&fl, 0, sizeof(fl)); fl.mark = sk->sk_mark; if (sin6) { if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (sin6->sin6_family && sin6->sin6_family != AF_INET6) return(-EAFNOSUPPORT); /* port is the proto value [0..255] carried in nexthdr */ proto = ntohs(sin6->sin6_port); if (!proto) proto = inet->inet_num; else if (proto != inet->inet_num) return(-EINVAL); if (proto > 255) return(-EINVAL); daddr = &sin6->sin6_addr; if (np->sndflow) { fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) { flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); if (flowlabel == NULL) return -EINVAL; daddr = &flowlabel->dst; } } /* * Otherwise it will be difficult to maintain * sk->sk_dst_cache. */ if (sk->sk_state == TCP_ESTABLISHED && ipv6_addr_equal(daddr, &np->daddr)) daddr = &np->daddr; if (addr_len >= sizeof(struct sockaddr_in6) && sin6->sin6_scope_id && ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL) fl.oif = sin6->sin6_scope_id; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; proto = inet->inet_num; daddr = &np->daddr; fl.fl6_flowlabel = np->flow_label; } if (fl.oif == 0) fl.oif = sk->sk_bound_dev_if; if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass, &dontfrag); if (err < 0) { fl6_sock_release(flowlabel); return err; } if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); if (flowlabel == NULL) return -EINVAL; } if (!(opt->opt_nflen|opt->opt_flen)) opt = NULL; } if (opt == NULL) opt = np->opt; if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); fl.proto = proto; err = rawv6_probe_proto_opt(&fl, msg); if (err) goto out; if (!ipv6_addr_any(daddr)) ipv6_addr_copy(&fl.fl6_dst, daddr); else fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) ipv6_addr_copy(&fl.fl6_src, &np->saddr); /* merge ip6_build_xmit from ip6_output */ if (opt && opt->srcrt) { struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; ipv6_addr_copy(&final, &fl.fl6_dst); ipv6_addr_copy(&fl.fl6_dst, rt0->addr); final_p = &final; } if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) fl.oif = np->mcast_oif; security_sk_classify_flow(sk, &fl); err = ip6_dst_lookup(sk, &dst, &fl); if (err) goto out; if (final_p) ipv6_addr_copy(&fl.fl6_dst, final_p); err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT); if (err < 0) { if (err == -EREMOTE) err = ip6_dst_blackhole(sk, &dst, &fl); if (err < 0) goto out; } if (hlimit < 0) { if (ipv6_addr_is_multicast(&fl.fl6_dst)) hlimit = np->mcast_hops; else hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); } if (tclass < 0) tclass = np->tclass; if (dontfrag < 0) dontfrag = np->dontfrag; if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: if (inet->hdrincl) { err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, (struct rt6_info*)dst, msg->msg_flags); } else { lock_sock(sk); err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst, msg->msg_flags, dontfrag); if (err) ip6_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) err = rawv6_push_pending_frames(sk, &fl, rp); release_sock(sk); } done: dst_release(dst); out: fl6_sock_release(flowlabel); return err<0?err:len; do_confirm: dst_confirm(dst); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; goto done; } static int rawv6_seticmpfilter(struct sock *sk, int level, int optname, char __user *optval, int optlen) { switch (optname) { case ICMPV6_FILTER: if (optlen > sizeof(struct icmp6_filter)) optlen = sizeof(struct icmp6_filter); if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen)) return -EFAULT; return 0; default: return -ENOPROTOOPT; } return 0; } static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { int len; switch (optname) { case ICMPV6_FILTER: if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; if (len > sizeof(struct icmp6_filter)) len = sizeof(struct icmp6_filter); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &raw6_sk(sk)->filter, len)) return -EFAULT; return 0; default: return -ENOPROTOOPT; } return 0; } static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct raw6_sock *rp = raw6_sk(sk); int val; if (get_user(val, (int __user *)optval)) return -EFAULT; switch (optname) { case IPV6_CHECKSUM: if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 && level == IPPROTO_IPV6) { /* * RFC3542 tells that IPV6_CHECKSUM socket * option in the IPPROTO_IPV6 level is not * allowed on ICMPv6 sockets. * If you want to set it, use IPPROTO_RAW * level IPV6_CHECKSUM socket option * (Linux extension). */ return -EINVAL; } /* You may get strange result with a positive odd offset; RFC2292bis agrees with me. */ if (val > 0 && (val&1)) return(-EINVAL); if (val < 0) { rp->checksum = 0; } else { rp->checksum = 1; rp->offset = val; } return 0; break; default: return(-ENOPROTOOPT); } } static int rawv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { switch(level) { case SOL_RAW: break; case SOL_ICMPV6: if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) return -EOPNOTSUPP; return rawv6_seticmpfilter(sk, level, optname, optval, optlen); case SOL_IPV6: if (optname == IPV6_CHECKSUM) break; default: return ipv6_setsockopt(sk, level, optname, optval, optlen); } return do_rawv6_setsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { switch (level) { case SOL_RAW: break; case SOL_ICMPV6: if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) return -EOPNOTSUPP; return rawv6_seticmpfilter(sk, level, optname, optval, optlen); case SOL_IPV6: if (optname == IPV6_CHECKSUM) break; default: return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); } return do_rawv6_setsockopt(sk, level, optname, optval, optlen); } #endif static int do_rawv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct raw6_sock *rp = raw6_sk(sk); int val, len; if (get_user(len,optlen)) return -EFAULT; switch (optname) { case IPV6_CHECKSUM: /* * We allow getsockopt() for IPPROTO_IPV6-level * IPV6_CHECKSUM socket option on ICMPv6 sockets * since RFC3542 is silent about it. */ if (rp->checksum == 0) val = -1; else val = rp->offset; break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, sizeof(int), len); if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval,&val,len)) return -EFAULT; return 0; } static int rawv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { switch(level) { case SOL_RAW: break; case SOL_ICMPV6: if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) return -EOPNOTSUPP; return rawv6_geticmpfilter(sk, level, optname, optval, optlen); case SOL_IPV6: if (optname == IPV6_CHECKSUM) break; default: return ipv6_getsockopt(sk, level, optname, optval, optlen); } return do_rawv6_getsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { switch (level) { case SOL_RAW: break; case SOL_ICMPV6: if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) return -EOPNOTSUPP; return rawv6_geticmpfilter(sk, level, optname, optval, optlen); case SOL_IPV6: if (optname == IPV6_CHECKSUM) break; default: return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); } return do_rawv6_getsockopt(sk, level, optname, optval, optlen); } #endif static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch(cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { struct sk_buff *skb; int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) amount = skb->tail - skb->transport_header; spin_unlock_bh(&sk->sk_receive_queue.lock); return put_user(amount, (int __user *)arg); } default: #ifdef CONFIG_IPV6_MROUTE return ip6mr_ioctl(sk, cmd, (void __user *)arg); #else return -ENOIOCTLCMD; #endif } } static void rawv6_close(struct sock *sk, long timeout) { if (inet_sk(sk)->inet_num == IPPROTO_RAW) ip6_ra_control(sk, -1); ip6mr_sk_done(sk); sk_common_release(sk); } static void raw6_destroy(struct sock *sk) { lock_sock(sk); ip6_flush_pending_frames(sk); release_sock(sk); inet6_destroy_sock(sk); } static int rawv6_init_sk(struct sock *sk) { struct raw6_sock *rp = raw6_sk(sk); switch (inet_sk(sk)->inet_num) { case IPPROTO_ICMPV6: rp->checksum = 1; rp->offset = 2; break; case IPPROTO_MH: rp->checksum = 1; rp->offset = 4; break; default: break; } return(0); } struct proto rawv6_prot = { .name = "RAWv6", .owner = THIS_MODULE, .close = rawv6_close, .destroy = raw6_destroy, .connect = ip6_datagram_connect, .disconnect = udp_disconnect, .ioctl = rawv6_ioctl, .init = rawv6_init_sk, .setsockopt = rawv6_setsockopt, .getsockopt = rawv6_getsockopt, .sendmsg = rawv6_sendmsg, .recvmsg = rawv6_recvmsg, .bind = rawv6_bind, .backlog_rcv = rawv6_rcv_skb, .hash = raw_hash_sk, .unhash = raw_unhash_sk, .obj_size = sizeof(struct raw6_sock), .h.raw_hash = &raw_v6_hashinfo, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_rawv6_setsockopt, .compat_getsockopt = compat_rawv6_getsockopt, #endif }; #ifdef CONFIG_PROC_FS static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) { struct ipv6_pinfo *np = inet6_sk(sp); struct in6_addr *dest, *src; __u16 destp, srcp; dest = &np->daddr; src = &np->rcv_saddr; destp = 0; srcp = inet_sk(sp)->inet_num; seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); } static int raw6_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, " sl " "local_address " "remote_address " "st tx_queue rx_queue tr tm->when retrnsmt" " uid timeout inode ref pointer drops\n"); else raw6_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); return 0; } static const struct seq_operations raw6_seq_ops = { .start = raw_seq_start, .next = raw_seq_next, .stop = raw_seq_stop, .show = raw6_seq_show, }; static int raw6_seq_open(struct inode *inode, struct file *file) { return raw_seq_open(inode, file, &raw_v6_hashinfo, &raw6_seq_ops); } static const struct file_operations raw6_seq_fops = { .owner = THIS_MODULE, .open = raw6_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static int __net_init raw6_init_net(struct net *net) { if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops)) return -ENOMEM; return 0; } static void __net_exit raw6_exit_net(struct net *net) { proc_net_remove(net, "raw6"); } static struct pernet_operations raw6_net_ops = { .init = raw6_init_net, .exit = raw6_exit_net, }; int __init raw6_proc_init(void) { return register_pernet_subsys(&raw6_net_ops); } void raw6_proc_exit(void) { unregister_pernet_subsys(&raw6_net_ops); } #endif /* CONFIG_PROC_FS */ /* Same as inet6_dgram_ops, sans udp_poll. */ static const struct proto_ops inet6_sockraw_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, .connect = inet_dgram_connect, /* ok */ .socketpair = sock_no_socketpair, /* a do nothing */ .accept = sock_no_accept, /* a do nothing */ .getname = inet6_getname, .poll = datagram_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = sock_no_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ .setsockopt = sock_common_setsockopt, /* ok */ .getsockopt = sock_common_getsockopt, /* ok */ .sendmsg = inet_sendmsg, /* ok */ .recvmsg = sock_common_recvmsg, /* ok */ .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static struct inet_protosw rawv6_protosw = { .type = SOCK_RAW, .protocol = IPPROTO_IP, /* wild card */ .prot = &rawv6_prot, .ops = &inet6_sockraw_ops, .no_check = UDP_CSUM_DEFAULT, .flags = INET_PROTOSW_REUSE, }; int __init rawv6_init(void) { int ret; ret = inet6_register_protosw(&rawv6_protosw); if (ret) goto out; out: return ret; } void rawv6_exit(void) { inet6_unregister_protosw(&rawv6_protosw); }
gpl-2.0
Myself5/android_kernel_sony_msm
mm/memory_hotplug.c
760
49970
/* * linux/mm/memory_hotplug.c * * Copyright (C) */ #include <linux/stddef.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/interrupt.h> #include <linux/pagemap.h> #include <linux/bootmem.h> #include <linux/compiler.h> #include <linux/export.h> #include <linux/pagevec.h> #include <linux/writeback.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/cpu.h> #include <linux/memory.h> #include <linux/memory_hotplug.h> #include <linux/highmem.h> #include <linux/vmalloc.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/migrate.h> #include <linux/page-isolation.h> #include <linux/pfn.h> #include <linux/suspend.h> #include <linux/mm_inline.h> #include <linux/firmware-map.h> #include <linux/stop_machine.h> #include <asm/tlbflush.h> #include "internal.h" /* * online_page_callback contains pointer to current page onlining function. * Initially it is generic_online_page(). If it is required it could be * changed by calling set_online_page_callback() for callback registration * and restore_online_page_callback() for generic callback restore. */ static void generic_online_page(struct page *page); static online_page_callback_t online_page_callback = generic_online_page; DEFINE_MUTEX(mem_hotplug_mutex); void lock_memory_hotplug(void) { mutex_lock(&mem_hotplug_mutex); /* for exclusive hibernation if CONFIG_HIBERNATION=y */ lock_system_sleep(); } void unlock_memory_hotplug(void) { unlock_system_sleep(); mutex_unlock(&mem_hotplug_mutex); } /* add this memory to iomem resource */ static struct resource *register_memory_resource(u64 start, u64 size) { struct resource *res; res = kzalloc(sizeof(struct resource), GFP_KERNEL); BUG_ON(!res); res->name = "System RAM"; res->start = start; res->end = start + size - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; if (request_resource(&iomem_resource, res) < 0) { printk("System RAM resource %pR cannot be added\n", res); kfree(res); res = NULL; } return res; } static void release_memory_resource(struct resource *res) { if (!res) return; release_resource(res); kfree(res); return; } #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE void get_page_bootmem(unsigned long info, struct page *page, unsigned long type) { page->lru.next = (struct list_head *) type; SetPagePrivate(page); set_page_private(page, info); atomic_inc(&page->_count); } /* reference to __meminit __free_pages_bootmem is valid * so use __ref to tell modpost not to generate a warning */ void __ref put_page_bootmem(struct page *page) { unsigned long type; static DEFINE_MUTEX(ppb_lock); type = (unsigned long) page->lru.next; BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); if (atomic_dec_return(&page->_count) == 1) { ClearPagePrivate(page); set_page_private(page, 0); INIT_LIST_HEAD(&page->lru); /* * Please refer to comment for __free_pages_bootmem() * for why we serialize here. */ mutex_lock(&ppb_lock); __free_pages_bootmem(page, 0); mutex_unlock(&ppb_lock); totalram_pages++; } } #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE #ifndef CONFIG_SPARSEMEM_VMEMMAP static void register_page_bootmem_info_section(unsigned long start_pfn) { unsigned long *usemap, mapsize, page_mapsize, section_nr, i, j; struct mem_section *ms; struct page *page, *memmap, *page_page; int memmap_page_valid; section_nr = pfn_to_section_nr(start_pfn); ms = __nr_to_section(section_nr); /* Get section's memmap address */ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); /* * Get page for the memmap's phys address * XXX: need more consideration for sparse_vmemmap... */ page = virt_to_page(memmap); mapsize = sizeof(struct page) * PAGES_PER_SECTION; mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; page_mapsize = PAGE_SIZE/sizeof(struct page); /* remember memmap's page, except those that reference only holes */ for (i = 0; i < mapsize; i++, page++) { memmap_page_valid = 0; page_page = __va(page_to_pfn(page) << PAGE_SHIFT); for (j = 0; j < page_mapsize; j++, page_page++) { if (early_pfn_valid(page_to_pfn(page_page))) { memmap_page_valid = 1; break; } } if (memmap_page_valid) get_page_bootmem(section_nr, page, SECTION_INFO); } usemap = __nr_to_section(section_nr)->pageblock_flags; page = virt_to_page(usemap); mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; for (i = 0; i < mapsize; i++, page++) get_page_bootmem(section_nr, page, MIX_SECTION_INFO); } #else /* CONFIG_SPARSEMEM_VMEMMAP */ static void register_page_bootmem_info_section(unsigned long start_pfn) { unsigned long *usemap, mapsize, section_nr, i; struct mem_section *ms; struct page *page, *memmap; if (!pfn_valid(start_pfn)) return; section_nr = pfn_to_section_nr(start_pfn); ms = __nr_to_section(section_nr); memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); usemap = __nr_to_section(section_nr)->pageblock_flags; page = virt_to_page(usemap); mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; for (i = 0; i < mapsize; i++, page++) get_page_bootmem(section_nr, page, MIX_SECTION_INFO); } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ void register_page_bootmem_info_node(struct pglist_data *pgdat) { unsigned long i, pfn, end_pfn, nr_pages; int node = pgdat->node_id; struct page *page; struct zone *zone; nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; page = virt_to_page(pgdat); for (i = 0; i < nr_pages; i++, page++) get_page_bootmem(node, page, NODE_INFO); zone = &pgdat->node_zones[0]; for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { if (zone->wait_table) { nr_pages = zone->wait_table_hash_nr_entries * sizeof(wait_queue_head_t); nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; page = virt_to_page(zone->wait_table); for (i = 0; i < nr_pages; i++, page++) get_page_bootmem(node, page, NODE_INFO); } } pfn = pgdat->node_start_pfn; end_pfn = pgdat_end_pfn(pgdat); /* register_section info */ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { /* * Some platforms can assign the same pfn to multiple nodes - on * node0 as well as nodeN. To avoid registering a pfn against * multiple nodes we check that this pfn does not already * reside in some other node. */ if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) register_page_bootmem_info_section(pfn); } } #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ static void grow_zone_span(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { unsigned long old_zone_end_pfn; zone_span_writelock(zone); old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; if (!zone->spanned_pages || start_pfn < zone->zone_start_pfn) zone->zone_start_pfn = start_pfn; zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - zone->zone_start_pfn; zone_span_writeunlock(zone); } static void resize_zone(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { zone_span_writelock(zone); if (end_pfn - start_pfn) { zone->zone_start_pfn = start_pfn; zone->spanned_pages = end_pfn - start_pfn; } else { /* * make it consist as free_area_init_core(), * if spanned_pages = 0, then keep start_pfn = 0 */ zone->zone_start_pfn = 0; zone->spanned_pages = 0; } zone_span_writeunlock(zone); } static void fix_zone_id(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { enum zone_type zid = zone_idx(zone); int nid = zone->zone_pgdat->node_id; unsigned long pfn; for (pfn = start_pfn; pfn < end_pfn; pfn++) set_page_links(pfn_to_page(pfn), zid, nid, pfn); } /* Can fail with -ENOMEM from allocating a wait table with vmalloc() or * alloc_bootmem_node_nopanic() */ static int __ref ensure_zone_is_initialized(struct zone *zone, unsigned long start_pfn, unsigned long num_pages) { if (!zone_is_initialized(zone)) return init_currently_empty_zone(zone, start_pfn, num_pages, MEMMAP_HOTPLUG); return 0; } static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2, unsigned long start_pfn, unsigned long end_pfn) { int ret; unsigned long flags; unsigned long z1_start_pfn; ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn); if (ret) return ret; pgdat_resize_lock(z1->zone_pgdat, &flags); /* can't move pfns which are higher than @z2 */ if (end_pfn > zone_end_pfn(z2)) goto out_fail; /* the move out part mast at the left most of @z2 */ if (start_pfn > z2->zone_start_pfn) goto out_fail; /* must included/overlap */ if (end_pfn <= z2->zone_start_pfn) goto out_fail; /* use start_pfn for z1's start_pfn if z1 is empty */ if (z1->spanned_pages) z1_start_pfn = z1->zone_start_pfn; else z1_start_pfn = start_pfn; resize_zone(z1, z1_start_pfn, end_pfn); resize_zone(z2, end_pfn, zone_end_pfn(z2)); pgdat_resize_unlock(z1->zone_pgdat, &flags); fix_zone_id(z1, start_pfn, end_pfn); return 0; out_fail: pgdat_resize_unlock(z1->zone_pgdat, &flags); return -1; } static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2, unsigned long start_pfn, unsigned long end_pfn) { int ret; unsigned long flags; unsigned long z2_end_pfn; ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn); if (ret) return ret; pgdat_resize_lock(z1->zone_pgdat, &flags); /* can't move pfns which are lower than @z1 */ if (z1->zone_start_pfn > start_pfn) goto out_fail; /* the move out part mast at the right most of @z1 */ if (zone_end_pfn(z1) > end_pfn) goto out_fail; /* must included/overlap */ if (start_pfn >= zone_end_pfn(z1)) goto out_fail; /* use end_pfn for z2's end_pfn if z2 is empty */ if (z2->spanned_pages) z2_end_pfn = zone_end_pfn(z2); else z2_end_pfn = end_pfn; resize_zone(z1, z1->zone_start_pfn, start_pfn); resize_zone(z2, start_pfn, z2_end_pfn); pgdat_resize_unlock(z1->zone_pgdat, &flags); fix_zone_id(z2, start_pfn, end_pfn); return 0; out_fail: pgdat_resize_unlock(z1->zone_pgdat, &flags); return -1; } static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, unsigned long end_pfn) { unsigned long old_pgdat_end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages; if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) pgdat->node_start_pfn = start_pfn; pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - pgdat->node_start_pfn; } static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) { struct pglist_data *pgdat = zone->zone_pgdat; int nr_pages = PAGES_PER_SECTION; int nid = pgdat->node_id; int zone_type; unsigned long flags; int ret; zone_type = zone - pgdat->node_zones; ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages); if (ret) return ret; pgdat_resize_lock(zone->zone_pgdat, &flags); grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, phys_start_pfn + nr_pages); pgdat_resize_unlock(zone->zone_pgdat, &flags); memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn, MEMMAP_HOTPLUG); return 0; } static int __meminit __add_section(int nid, struct zone *zone, unsigned long phys_start_pfn) { int nr_pages = PAGES_PER_SECTION; int ret; if (pfn_valid(phys_start_pfn)) return -EEXIST; ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); if (ret < 0) return ret; ret = __add_zone(zone, phys_start_pfn); if (ret < 0) return ret; return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); } /* * Reasonably generic function for adding memory. It is * expected that archs that support memory hotplug will * call this function after deciding the zone to which to * add the new pages. */ int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, unsigned long nr_pages) { unsigned long i; int err = 0; int start_sec, end_sec; /* during initialize mem_map, align hot-added range to section */ start_sec = pfn_to_section_nr(phys_start_pfn); end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); for (i = start_sec; i <= end_sec; i++) { err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); /* * EEXIST is finally dealt with by ioresource collision * check. see add_memory() => register_memory_resource() * Warning will be printed if there is collision. */ if (err && (err != -EEXIST)) break; err = 0; } return err; } EXPORT_SYMBOL_GPL(__add_pages); #ifdef CONFIG_MEMORY_HOTREMOVE /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ static int find_smallest_section_pfn(int nid, struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { struct mem_section *ms; for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { ms = __pfn_to_section(start_pfn); if (unlikely(!valid_section(ms))) continue; if (unlikely(pfn_to_nid(start_pfn) != nid)) continue; if (zone && zone != page_zone(pfn_to_page(start_pfn))) continue; return start_pfn; } return 0; } /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ static int find_biggest_section_pfn(int nid, struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { struct mem_section *ms; unsigned long pfn; /* pfn is the end pfn of a memory section. */ pfn = end_pfn - 1; for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { ms = __pfn_to_section(pfn); if (unlikely(!valid_section(ms))) continue; if (unlikely(pfn_to_nid(pfn) != nid)) continue; if (zone && zone != page_zone(pfn_to_page(pfn))) continue; return pfn; } return 0; } static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { unsigned long zone_start_pfn = zone->zone_start_pfn; unsigned long zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; unsigned long pfn; struct mem_section *ms; int nid = zone_to_nid(zone); zone_span_writelock(zone); if (zone_start_pfn == start_pfn) { /* * If the section is smallest section in the zone, it need * shrink zone->zone_start_pfn and zone->zone_spanned_pages. * In this case, we find second smallest valid mem_section * for shrinking zone. */ pfn = find_smallest_section_pfn(nid, zone, end_pfn, zone_end_pfn); if (pfn) { zone->zone_start_pfn = pfn; zone->spanned_pages = zone_end_pfn - pfn; } } else if (zone_end_pfn == end_pfn) { /* * If the section is biggest section in the zone, it need * shrink zone->spanned_pages. * In this case, we find second biggest valid mem_section for * shrinking zone. */ pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, start_pfn); if (pfn) zone->spanned_pages = pfn - zone_start_pfn + 1; } /* * The section is not biggest or smallest mem_section in the zone, it * only creates a hole in the zone. So in this case, we need not * change the zone. But perhaps, the zone has only hole data. Thus * it check the zone has only hole or not. */ pfn = zone_start_pfn; for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) { ms = __pfn_to_section(pfn); if (unlikely(!valid_section(ms))) continue; if (page_zone(pfn_to_page(pfn)) != zone) continue; /* If the section is current section, it continues the loop */ if (start_pfn == pfn) continue; /* If we find valid section, we have nothing to do */ zone_span_writeunlock(zone); return; } /* The zone has no valid section */ zone->zone_start_pfn = 0; zone->spanned_pages = 0; zone_span_writeunlock(zone); } static void shrink_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, unsigned long end_pfn) { unsigned long pgdat_start_pfn = pgdat->node_start_pfn; unsigned long pgdat_end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages; unsigned long pfn; struct mem_section *ms; int nid = pgdat->node_id; if (pgdat_start_pfn == start_pfn) { /* * If the section is smallest section in the pgdat, it need * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages. * In this case, we find second smallest valid mem_section * for shrinking zone. */ pfn = find_smallest_section_pfn(nid, NULL, end_pfn, pgdat_end_pfn); if (pfn) { pgdat->node_start_pfn = pfn; pgdat->node_spanned_pages = pgdat_end_pfn - pfn; } } else if (pgdat_end_pfn == end_pfn) { /* * If the section is biggest section in the pgdat, it need * shrink pgdat->node_spanned_pages. * In this case, we find second biggest valid mem_section for * shrinking zone. */ pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn, start_pfn); if (pfn) pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1; } /* * If the section is not biggest or smallest mem_section in the pgdat, * it only creates a hole in the pgdat. So in this case, we need not * change the pgdat. * But perhaps, the pgdat has only hole data. Thus it check the pgdat * has only hole or not. */ pfn = pgdat_start_pfn; for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) { ms = __pfn_to_section(pfn); if (unlikely(!valid_section(ms))) continue; if (pfn_to_nid(pfn) != nid) continue; /* If the section is current section, it continues the loop */ if (start_pfn == pfn) continue; /* If we find valid section, we have nothing to do */ return; } /* The pgdat has no valid section */ pgdat->node_start_pfn = 0; pgdat->node_spanned_pages = 0; } static void __remove_zone(struct zone *zone, unsigned long start_pfn) { struct pglist_data *pgdat = zone->zone_pgdat; int nr_pages = PAGES_PER_SECTION; int zone_type; unsigned long flags; zone_type = zone - pgdat->node_zones; pgdat_resize_lock(zone->zone_pgdat, &flags); shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); pgdat_resize_unlock(zone->zone_pgdat, &flags); } static int __remove_section(struct zone *zone, struct mem_section *ms) { unsigned long start_pfn; int scn_nr; int ret = -EINVAL; if (!valid_section(ms)) return ret; ret = unregister_memory_section(ms); if (ret) return ret; scn_nr = __section_nr(ms); start_pfn = section_nr_to_pfn(scn_nr); __remove_zone(zone, start_pfn); sparse_remove_one_section(zone, ms); return 0; } /** * __remove_pages() - remove sections of pages from a zone * @zone: zone from which pages need to be removed * @phys_start_pfn: starting pageframe (must be aligned to start of a section) * @nr_pages: number of pages to remove (must be multiple of section size) * * Generic helper function to remove section mappings and sysfs entries * for the section of the memory we are removing. Caller needs to make * sure that pages are marked reserved and zones are adjust properly by * calling offline_pages(). */ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, unsigned long nr_pages) { unsigned long i; int sections_to_remove; resource_size_t start, size; int ret = 0; /* * We can only remove entire sections */ BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); BUG_ON(nr_pages % PAGES_PER_SECTION); start = phys_start_pfn << PAGE_SHIFT; size = nr_pages * PAGE_SIZE; ret = release_mem_region_adjustable(&iomem_resource, start, size); if (ret) { resource_size_t endres = start + size - 1; pr_warn("Unable to release resource <%pa-%pa> (%d)\n", &start, &endres, ret); } sections_to_remove = nr_pages / PAGES_PER_SECTION; for (i = 0; i < sections_to_remove; i++) { unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; ret = __remove_section(zone, __pfn_to_section(pfn)); if (ret) break; } return ret; } EXPORT_SYMBOL_GPL(__remove_pages); #endif /* CONFIG_MEMORY_HOTREMOVE */ int set_online_page_callback(online_page_callback_t callback) { int rc = -EINVAL; lock_memory_hotplug(); if (online_page_callback == generic_online_page) { online_page_callback = callback; rc = 0; } unlock_memory_hotplug(); return rc; } EXPORT_SYMBOL_GPL(set_online_page_callback); int restore_online_page_callback(online_page_callback_t callback) { int rc = -EINVAL; lock_memory_hotplug(); if (online_page_callback == callback) { online_page_callback = generic_online_page; rc = 0; } unlock_memory_hotplug(); return rc; } EXPORT_SYMBOL_GPL(restore_online_page_callback); void __online_page_set_limits(struct page *page) { unsigned long pfn = page_to_pfn(page); totalram_pages++; #ifdef CONFIG_FIX_MOVABLE_ZONE if (zone_idx(page_zone(page)) != ZONE_MOVABLE) total_unmovable_pages++; #endif if (pfn >= num_physpages) num_physpages = pfn + 1; } EXPORT_SYMBOL_GPL(__online_page_set_limits); void __online_page_increment_counters(struct page *page) { totalram_pages++; #ifdef CONFIG_HIGHMEM if (PageHighMem(page)) totalhigh_pages++; #endif } EXPORT_SYMBOL_GPL(__online_page_increment_counters); void __online_page_free(struct page *page) { ClearPageReserved(page); init_page_count(page); __free_page(page); } EXPORT_SYMBOL_GPL(__online_page_free); static void generic_online_page(struct page *page) { __online_page_set_limits(page); __online_page_increment_counters(page); __online_page_free(page); } static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, void *arg) { unsigned long i; unsigned long onlined_pages = *(unsigned long *)arg; struct page *page; if (PageReserved(pfn_to_page(start_pfn))) for (i = 0; i < nr_pages; i++) { page = pfn_to_page(start_pfn + i); (*online_page_callback)(page); onlined_pages++; } *(unsigned long *)arg = onlined_pages; return 0; } #ifdef CONFIG_MOVABLE_NODE /* * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have * normal memory. */ static bool can_online_high_movable(struct zone *zone) { return true; } #else /* CONFIG_MOVABLE_NODE */ /* ensure every online node has NORMAL memory */ static bool can_online_high_movable(struct zone *zone) { return node_state(zone_to_nid(zone), N_NORMAL_MEMORY); } #endif /* CONFIG_MOVABLE_NODE */ /* check which state of node_states will be changed when online memory */ static void node_states_check_changes_online(unsigned long nr_pages, struct zone *zone, struct memory_notify *arg) { int nid = zone_to_nid(zone); enum zone_type zone_last = ZONE_NORMAL; /* * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] * contains nodes which have zones of 0...ZONE_NORMAL, * set zone_last to ZONE_NORMAL. * * If we don't have HIGHMEM nor movable node, * node_states[N_NORMAL_MEMORY] contains nodes which have zones of * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. */ if (N_MEMORY == N_NORMAL_MEMORY) zone_last = ZONE_MOVABLE; /* * if the memory to be online is in a zone of 0...zone_last, and * the zones of 0...zone_last don't have memory before online, we will * need to set the node to node_states[N_NORMAL_MEMORY] after * the memory is online. */ if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY)) arg->status_change_nid_normal = nid; else arg->status_change_nid_normal = -1; #ifdef CONFIG_HIGHMEM /* * If we have movable node, node_states[N_HIGH_MEMORY] * contains nodes which have zones of 0...ZONE_HIGHMEM, * set zone_last to ZONE_HIGHMEM. * * If we don't have movable node, node_states[N_NORMAL_MEMORY] * contains nodes which have zones of 0...ZONE_MOVABLE, * set zone_last to ZONE_MOVABLE. */ zone_last = ZONE_HIGHMEM; if (N_MEMORY == N_HIGH_MEMORY) zone_last = ZONE_MOVABLE; if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY)) arg->status_change_nid_high = nid; else arg->status_change_nid_high = -1; #else arg->status_change_nid_high = arg->status_change_nid_normal; #endif /* * if the node don't have memory befor online, we will need to * set the node to node_states[N_MEMORY] after the memory * is online. */ if (!node_state(nid, N_MEMORY)) arg->status_change_nid = nid; else arg->status_change_nid = -1; } static void node_states_set_node(int node, struct memory_notify *arg) { if (arg->status_change_nid_normal >= 0) node_set_state(node, N_NORMAL_MEMORY); if (arg->status_change_nid_high >= 0) node_set_state(node, N_HIGH_MEMORY); node_set_state(node, N_MEMORY); } int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) { unsigned long onlined_pages = 0; struct zone *zone; int need_zonelists_rebuild = 0; int nid; int ret; struct memory_notify arg; lock_memory_hotplug(); /* * This doesn't need a lock to do pfn_to_page(). * The section can't be removed here because of the * memory_block->state_mutex. */ zone = page_zone(pfn_to_page(pfn)); if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) && !can_online_high_movable(zone)) { unlock_memory_hotplug(); return -1; } if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) { if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) { unlock_memory_hotplug(); return -1; } } if (online_type == ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) { if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) { unlock_memory_hotplug(); return -1; } } /* Previous code may changed the zone of the pfn range */ zone = page_zone(pfn_to_page(pfn)); arg.start_pfn = pfn; arg.nr_pages = nr_pages; node_states_check_changes_online(nr_pages, zone, &arg); nid = page_to_nid(pfn_to_page(pfn)); ret = memory_notify(MEM_GOING_ONLINE, &arg); ret = notifier_to_errno(ret); if (ret) { memory_notify(MEM_CANCEL_ONLINE, &arg); unlock_memory_hotplug(); return ret; } /* * If this zone is not populated, then it is not in zonelist. * This means the page allocator ignores this zone. * So, zonelist must be updated after online. */ mutex_lock(&zonelists_mutex); if (!populated_zone(zone)) { need_zonelists_rebuild = 1; build_all_zonelists(NULL, zone); } ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, online_pages_range); if (ret) { if (need_zonelists_rebuild) zone_pcp_reset(zone); mutex_unlock(&zonelists_mutex); printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n", (unsigned long long) pfn << PAGE_SHIFT, (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); memory_notify(MEM_CANCEL_ONLINE, &arg); unlock_memory_hotplug(); return ret; } zone->managed_pages += onlined_pages; zone->present_pages += onlined_pages; zone->zone_pgdat->node_present_pages += onlined_pages; if (onlined_pages) { drain_all_pages(); node_states_set_node(zone_to_nid(zone), &arg); if (need_zonelists_rebuild) build_all_zonelists(NULL, NULL); else zone_pcp_update(zone); } mutex_unlock(&zonelists_mutex); init_per_zone_wmark_min(); if (onlined_pages) kswapd_run(zone_to_nid(zone)); vm_total_pages = nr_free_pagecache_pages(); writeback_set_ratelimit(); if (onlined_pages) memory_notify(MEM_ONLINE, &arg); unlock_memory_hotplug(); return 0; } #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) { struct pglist_data *pgdat; unsigned long zones_size[MAX_NR_ZONES] = {0}; unsigned long zholes_size[MAX_NR_ZONES] = {0}; unsigned long start_pfn = start >> PAGE_SHIFT; pgdat = NODE_DATA(nid); if (!pgdat) { pgdat = arch_alloc_nodedata(nid); if (!pgdat) return NULL; arch_refresh_nodedata(nid, pgdat); } /* we can use NODE_DATA(nid) from here */ /* init node's zones as empty zones, we don't have any present pages.*/ free_area_init_node(nid, zones_size, start_pfn, zholes_size); /* * The node we allocated has no zone fallback lists. For avoiding * to access not-initialized zonelist, build here. */ mutex_lock(&zonelists_mutex); build_all_zonelists(pgdat, NULL); mutex_unlock(&zonelists_mutex); return pgdat; } static void rollback_node_hotadd(int nid, pg_data_t *pgdat) { arch_refresh_nodedata(nid, NULL); arch_free_nodedata(pgdat); return; } /* * called by cpu_up() to online a node without onlined memory. */ int mem_online_node(int nid) { pg_data_t *pgdat; int ret; lock_memory_hotplug(); pgdat = hotadd_new_pgdat(nid, 0); if (!pgdat) { ret = -ENOMEM; goto out; } node_set_online(nid); ret = register_one_node(nid); BUG_ON(ret); out: unlock_memory_hotplug(); return ret; } /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ int __ref add_memory(int nid, u64 start, u64 size) { pg_data_t *pgdat = NULL; bool new_pgdat; bool new_node; struct resource *res; int ret; lock_memory_hotplug(); res = register_memory_resource(start, size); ret = -EEXIST; if (!res) goto out; { /* Stupid hack to suppress address-never-null warning */ void *p = NODE_DATA(nid); new_pgdat = !p; } new_node = !node_online(nid); if (new_node) { pgdat = hotadd_new_pgdat(nid, start); ret = -ENOMEM; if (!pgdat) goto error; } /* call arch's memory hotadd */ ret = arch_add_memory(nid, start, size); if (ret < 0) goto error; /* we online node here. we can't roll back from here. */ node_set_online(nid); if (new_node) { ret = register_one_node(nid); /* * If sysfs file of new node can't create, cpu on the node * can't be hot-added. There is no rollback way now. * So, check by BUG_ON() to catch it reluctantly.. */ BUG_ON(ret); } /* create new memmap entry */ firmware_map_add_hotplug(start, start + size, "System RAM"); goto out; error: /* rollback pgdat allocation and others */ if (new_pgdat) rollback_node_hotadd(nid, pgdat); release_memory_resource(res); out: unlock_memory_hotplug(); return ret; } EXPORT_SYMBOL_GPL(add_memory); int __ref physical_remove_memory(u64 start, u64 size) { int ret; struct resource *res, *res_old; res = kzalloc(sizeof(struct resource), GFP_KERNEL); BUG_ON(!res); ret = arch_physical_remove_memory(start, size); if (!ret) { kfree(res); return 0; } res->name = "System RAM"; res->start = start; res->end = start + size - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res_old = locate_resource(&iomem_resource, res); if (res_old) { release_resource(res_old); if (PageSlab(virt_to_head_page(res_old))) kfree(res_old); } kfree(res); return ret; } EXPORT_SYMBOL_GPL(physical_remove_memory); int __ref physical_active_memory(u64 start, u64 size) { int ret; ret = arch_physical_active_memory(start, size); return ret; } EXPORT_SYMBOL_GPL(physical_active_memory); int __ref physical_low_power_memory(u64 start, u64 size) { int ret; ret = arch_physical_low_power_memory(start, size); return ret; } EXPORT_SYMBOL_GPL(physical_low_power_memory); #ifdef CONFIG_MEMORY_HOTREMOVE /* * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy * set and the size of the free page is given by page_order(). Using this, * the function determines if the pageblock contains only free pages. * Due to buddy contraints, a free page at least the size of a pageblock will * be located at the start of the pageblock */ static inline int pageblock_free(struct page *page) { return PageBuddy(page) && page_order(page) >= pageblock_order; } /* Return the start of the next active pageblock after a given page */ static struct page *next_active_pageblock(struct page *page) { /* Ensure the starting page is pageblock-aligned */ BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); /* If the entire pageblock is free, move to the end of free page */ if (pageblock_free(page)) { int order; /* be careful. we don't have locks, page_order can be changed.*/ order = page_order(page); if ((order < MAX_ORDER) && (order >= pageblock_order)) return page + (1 << order); } return page + pageblock_nr_pages; } /* Checks if this range of memory is likely to be hot-removable. */ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) { struct page *page = pfn_to_page(start_pfn); struct page *end_page = page + nr_pages; /* Check the starting page of each pageblock within the range */ for (; page < end_page; page = next_active_pageblock(page)) { if (!is_pageblock_removable_nolock(page)) return 0; cond_resched(); } /* All pageblocks in the memory block are likely to be hot-removable */ return 1; } /* * Confirm all pages in a range [start, end) is belongs to the same zone. */ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct zone *zone = NULL; struct page *page; int i; for (pfn = start_pfn; pfn < end_pfn; pfn += MAX_ORDER_NR_PAGES) { i = 0; /* This is just a CONFIG_HOLES_IN_ZONE check.*/ while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) i++; if (i == MAX_ORDER_NR_PAGES) continue; page = pfn_to_page(pfn + i); if (zone && page_zone(page) != zone) return 0; zone = page_zone(page); } return 1; } /* * Scanning pfn is much easier than scanning lru list. * Scan pfn from start to end and Find LRU page. */ static unsigned long scan_lru_pages(unsigned long start, unsigned long end) { unsigned long pfn; struct page *page; for (pfn = start; pfn < end; pfn++) { if (pfn_valid(pfn)) { page = pfn_to_page(pfn); if (PageLRU(page)) return pfn; } } return 0; } #define NR_OFFLINE_AT_ONCE_PAGES (256) static int do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct page *page; int move_pages = NR_OFFLINE_AT_ONCE_PAGES; int not_managed = 0; int ret = 0; LIST_HEAD(source); for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); if (!get_page_unless_zero(page)) continue; /* * We can skip free pages. And we can only deal with pages on * LRU. */ ret = isolate_lru_page(page); if (!ret) { /* Success */ put_page(page); list_add_tail(&page->lru, &source); move_pages--; inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); } else { #ifdef CONFIG_DEBUG_VM printk(KERN_ALERT "removing pfn %lx from LRU failed\n", pfn); dump_page(page); #endif put_page(page); /* Because we don't have big zone->lock. we should check this again here. */ if (page_count(page)) { not_managed++; ret = -EBUSY; break; } } } if (!list_empty(&source)) { if (not_managed) { putback_lru_pages(&source); goto out; } /* * alloc_migrate_target should be improooooved!! * migrate_pages returns # of failed pages. */ ret = migrate_pages(&source, alloc_migrate_target, 0, MIGRATE_SYNC, MR_MEMORY_HOTPLUG); if (ret) putback_lru_pages(&source); } out: return ret; } /* * remove from free_area[] and mark all as Reserved. */ static int offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, void *data) { __offline_isolated_pages(start, start + nr_pages); return 0; } static void offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) { walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, offline_isolated_pages_cb); } /* * Check all pages in range, recoreded as memory resource, are isolated. */ static int check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, void *data) { int ret; long offlined = *(long *)data; ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); offlined = nr_pages; if (!ret) *(long *)data += offlined; return ret; } static long check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) { long offlined = 0; int ret; ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, check_pages_isolated_cb); if (ret < 0) offlined = (long)ret; return offlined; } #ifdef CONFIG_MOVABLE_NODE /* * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have * normal memory. */ static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) { return true; } #else /* CONFIG_MOVABLE_NODE */ /* ensure the node has NORMAL memory if it is still online */ static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) { struct pglist_data *pgdat = zone->zone_pgdat; unsigned long present_pages = 0; enum zone_type zt; for (zt = 0; zt <= ZONE_NORMAL; zt++) present_pages += pgdat->node_zones[zt].present_pages; if (present_pages > nr_pages) return true; present_pages = 0; for (; zt <= ZONE_MOVABLE; zt++) present_pages += pgdat->node_zones[zt].present_pages; /* * we can't offline the last normal memory until all * higher memory is offlined. */ return present_pages == 0; } #endif /* CONFIG_MOVABLE_NODE */ /* check which state of node_states will be changed when offline memory */ static void node_states_check_changes_offline(unsigned long nr_pages, struct zone *zone, struct memory_notify *arg) { struct pglist_data *pgdat = zone->zone_pgdat; unsigned long present_pages = 0; enum zone_type zt, zone_last = ZONE_NORMAL; /* * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] * contains nodes which have zones of 0...ZONE_NORMAL, * set zone_last to ZONE_NORMAL. * * If we don't have HIGHMEM nor movable node, * node_states[N_NORMAL_MEMORY] contains nodes which have zones of * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. */ if (N_MEMORY == N_NORMAL_MEMORY) zone_last = ZONE_MOVABLE; /* * check whether node_states[N_NORMAL_MEMORY] will be changed. * If the memory to be offline is in a zone of 0...zone_last, * and it is the last present memory, 0...zone_last will * become empty after offline , thus we can determind we will * need to clear the node from node_states[N_NORMAL_MEMORY]. */ for (zt = 0; zt <= zone_last; zt++) present_pages += pgdat->node_zones[zt].present_pages; if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) arg->status_change_nid_normal = zone_to_nid(zone); else arg->status_change_nid_normal = -1; #ifdef CONFIG_HIGHMEM /* * If we have movable node, node_states[N_HIGH_MEMORY] * contains nodes which have zones of 0...ZONE_HIGHMEM, * set zone_last to ZONE_HIGHMEM. * * If we don't have movable node, node_states[N_NORMAL_MEMORY] * contains nodes which have zones of 0...ZONE_MOVABLE, * set zone_last to ZONE_MOVABLE. */ zone_last = ZONE_HIGHMEM; if (N_MEMORY == N_HIGH_MEMORY) zone_last = ZONE_MOVABLE; for (; zt <= zone_last; zt++) present_pages += pgdat->node_zones[zt].present_pages; if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) arg->status_change_nid_high = zone_to_nid(zone); else arg->status_change_nid_high = -1; #else arg->status_change_nid_high = arg->status_change_nid_normal; #endif /* * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE */ zone_last = ZONE_MOVABLE; /* * check whether node_states[N_HIGH_MEMORY] will be changed * If we try to offline the last present @nr_pages from the node, * we can determind we will need to clear the node from * node_states[N_HIGH_MEMORY]. */ for (; zt <= zone_last; zt++) present_pages += pgdat->node_zones[zt].present_pages; if (nr_pages >= present_pages) arg->status_change_nid = zone_to_nid(zone); else arg->status_change_nid = -1; } static void node_states_clear_node(int node, struct memory_notify *arg) { if (arg->status_change_nid_normal >= 0) node_clear_state(node, N_NORMAL_MEMORY); if ((N_MEMORY != N_NORMAL_MEMORY) && (arg->status_change_nid_high >= 0)) node_clear_state(node, N_HIGH_MEMORY); if ((N_MEMORY != N_HIGH_MEMORY) && (arg->status_change_nid >= 0)) node_clear_state(node, N_MEMORY); } static int __ref __offline_pages(unsigned long start_pfn, unsigned long end_pfn, unsigned long timeout) { unsigned long pfn, nr_pages, expire; long offlined_pages; int ret, drain, retry_max, node; struct zone *zone; struct memory_notify arg; BUG_ON(start_pfn >= end_pfn); /* at least, alignment against pageblock is necessary */ if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) return -EINVAL; if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) return -EINVAL; /* This makes hotplug much easier...and readable. we assume this for now. .*/ if (!test_pages_in_a_zone(start_pfn, end_pfn)) return -EINVAL; lock_memory_hotplug(); zone = page_zone(pfn_to_page(start_pfn)); node = zone_to_nid(zone); nr_pages = end_pfn - start_pfn; ret = -EINVAL; if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages)) goto out; /* set above range as isolated */ ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE, true); if (ret) goto out; arg.start_pfn = start_pfn; arg.nr_pages = nr_pages; node_states_check_changes_offline(nr_pages, zone, &arg); ret = memory_notify(MEM_GOING_OFFLINE, &arg); ret = notifier_to_errno(ret); if (ret) goto failed_removal; pfn = start_pfn; expire = jiffies + timeout; drain = 0; retry_max = 5; repeat: /* start memory hot removal */ ret = -EAGAIN; if (time_after(jiffies, expire)) goto failed_removal; ret = -EINTR; if (signal_pending(current)) goto failed_removal; ret = 0; if (drain) { lru_add_drain_all(); cond_resched(); drain_all_pages(); } pfn = scan_lru_pages(start_pfn, end_pfn); if (pfn) { /* We have page on LRU */ ret = do_migrate_range(pfn, end_pfn); if (!ret) { drain = 1; goto repeat; } else { if (ret < 0) if (--retry_max == 0) goto failed_removal; yield(); drain = 1; goto repeat; } } /* drain all zone's lru pagevec, this is asynchronous... */ lru_add_drain_all(); yield(); /* drain pcp pages, this is synchronous. */ drain_all_pages(); /* check again */ offlined_pages = check_pages_isolated(start_pfn, end_pfn); if (offlined_pages < 0) { ret = -EBUSY; goto failed_removal; } printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); /* Ok, all of our target is isolated. We cannot do rollback at this point. */ offline_isolated_pages(start_pfn, end_pfn); /* reset pagetype flags and makes migrate type to be MOVABLE */ undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); /* removal success */ zone->managed_pages -= offlined_pages; if (offlined_pages > zone->present_pages) zone->present_pages = 0; else zone->present_pages -= offlined_pages; zone->zone_pgdat->node_present_pages -= offlined_pages; totalram_pages -= offlined_pages; #ifdef CONFIG_FIX_MOVABLE_ZONE if (zone_idx(zone) != ZONE_MOVABLE) total_unmovable_pages -= offlined_pages; #endif init_per_zone_wmark_min(); if (!populated_zone(zone)) { zone_pcp_reset(zone); mutex_lock(&zonelists_mutex); build_all_zonelists(NULL, NULL); mutex_unlock(&zonelists_mutex); } else zone_pcp_update(zone); node_states_clear_node(node, &arg); if (arg.status_change_nid >= 0) kswapd_stop(node); vm_total_pages = nr_free_pagecache_pages(); writeback_set_ratelimit(); memory_notify(MEM_OFFLINE, &arg); unlock_memory_hotplug(); return 0; failed_removal: printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n", (unsigned long long) start_pfn << PAGE_SHIFT, ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); memory_notify(MEM_CANCEL_OFFLINE, &arg); /* pushback to free area */ undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); out: unlock_memory_hotplug(); return ret; } int offline_pages(unsigned long start_pfn, unsigned long nr_pages) { return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ); } /** * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn) * @start_pfn: start pfn of the memory range * @end_pfn: end pfn of the memory range * @arg: argument passed to func * @func: callback for each memory section walked * * This function walks through all present mem sections in range * [start_pfn, end_pfn) and call func on each mem section. * * Returns the return value of func. */ static int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, void *arg, int (*func)(struct memory_block *, void *)) { struct memory_block *mem = NULL; struct mem_section *section; unsigned long pfn, section_nr; int ret; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { section_nr = pfn_to_section_nr(pfn); if (!present_section_nr(section_nr)) continue; section = __nr_to_section(section_nr); /* same memblock? */ if (mem) if ((section_nr >= mem->start_section_nr) && (section_nr <= mem->end_section_nr)) continue; mem = find_memory_block_hinted(section, mem); if (!mem) continue; ret = func(mem, arg); if (ret) { kobject_put(&mem->dev.kobj); return ret; } } if (mem) kobject_put(&mem->dev.kobj); return 0; } /** * offline_memory_block_cb - callback function for offlining memory block * @mem: the memory block to be offlined * @arg: buffer to hold error msg * * Always return 0, and put the error msg in arg if any. */ static int offline_memory_block_cb(struct memory_block *mem, void *arg) { int *ret = arg; int error = offline_memory_block(mem); if (error != 0 && *ret == 0) *ret = error; return 0; } static int is_memblock_offlined_cb(struct memory_block *mem, void *arg) { int ret = !is_memblock_offlined(mem); if (unlikely(ret)) { phys_addr_t beginpa, endpa; beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; pr_warn("removing memory fails, because memory " "[%pa-%pa] is onlined\n", &beginpa, &endpa); } return ret; } static int check_cpu_on_node(void *data) { struct pglist_data *pgdat = data; int cpu; for_each_present_cpu(cpu) { if (cpu_to_node(cpu) == pgdat->node_id) /* * the cpu on this node isn't removed, and we can't * offline this node. */ return -EBUSY; } return 0; } static void unmap_cpu_on_node(void *data) { #ifdef CONFIG_ACPI_NUMA struct pglist_data *pgdat = data; int cpu; for_each_possible_cpu(cpu) if (cpu_to_node(cpu) == pgdat->node_id) numa_clear_node(cpu); #endif } static int check_and_unmap_cpu_on_node(void *data) { int ret = check_cpu_on_node(data); if (ret) return ret; /* * the node will be offlined when we come here, so we can clear * the cpu_to_node() now. */ unmap_cpu_on_node(data); return 0; } /* offline the node if all memory sections of this node are removed */ void try_offline_node(int nid) { pg_data_t *pgdat = NODE_DATA(nid); unsigned long start_pfn = pgdat->node_start_pfn; unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; unsigned long pfn; struct page *pgdat_page = virt_to_page(pgdat); int i; for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { unsigned long section_nr = pfn_to_section_nr(pfn); if (!present_section_nr(section_nr)) continue; if (pfn_to_nid(pfn) != nid) continue; /* * some memory sections of this node are not removed, and we * can't offline node now. */ return; } if (stop_machine(check_and_unmap_cpu_on_node, pgdat, NULL)) return; /* * all memory/cpu of this node are removed, we can offline this * node now. */ node_set_offline(nid); unregister_one_node(nid); if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page)) /* node data is allocated from boot memory */ return; /* free waittable in each zone */ for (i = 0; i < MAX_NR_ZONES; i++) { struct zone *zone = pgdat->node_zones + i; /* * wait_table may be allocated from boot memory, * here only free if it's allocated by vmalloc. */ if (is_vmalloc_addr(zone->wait_table)) vfree(zone->wait_table); } /* * Since there is no way to guarentee the address of pgdat/zone is not * on stack of any kernel threads or used by other kernel objects * without reference counting or other symchronizing method, do not * reset node_data and free pgdat here. Just reset it to 0 and reuse * the memory when the node is online again. */ memset(pgdat, 0, sizeof(*pgdat)); } EXPORT_SYMBOL(try_offline_node); int __ref remove_memory(int nid, u64 start, u64 size) { unsigned long start_pfn, end_pfn; int ret = 0; int retry = 1; start_pfn = PFN_DOWN(start); end_pfn = PFN_UP(start + size - 1); /* * When CONFIG_MEMCG is on, one memory block may be used by other * blocks to store page cgroup when onlining pages. But we don't know * in what order pages are onlined. So we iterate twice to offline * memory: * 1st iterate: offline every non primary memory block. * 2nd iterate: offline primary (i.e. first added) memory block. */ repeat: walk_memory_range(start_pfn, end_pfn, &ret, offline_memory_block_cb); if (ret) { if (!retry) return ret; retry = 0; ret = 0; goto repeat; } lock_memory_hotplug(); /* * we have offlined all memory blocks like this: * 1. lock memory hotplug * 2. offline a memory block * 3. unlock memory hotplug * * repeat step1-3 to offline the memory block. All memory blocks * must be offlined before removing memory. But we don't hold the * lock in the whole operation. So we should check whether all * memory blocks are offlined. */ ret = walk_memory_range(start_pfn, end_pfn, NULL, is_memblock_offlined_cb); if (ret) { unlock_memory_hotplug(); return ret; } /* remove memmap entry */ firmware_map_remove(start, start + size, "System RAM"); arch_remove_memory(start, size); try_offline_node(nid); unlock_memory_hotplug(); return 0; } #else int offline_pages(unsigned long start_pfn, unsigned long nr_pages) { return -EINVAL; } int remove_memory(int nid, u64 start, u64 size) { return -EINVAL; } #endif /* CONFIG_MEMORY_HOTREMOVE */ EXPORT_SYMBOL_GPL(remove_memory);
gpl-2.0
spacecaker/Stock_spacecaker_kernel
net/mac80211/debugfs_sta.c
760
11358
/* * Copyright 2003-2005 Devicescape Software, Inc. * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/debugfs.h> #include <linux/ieee80211.h> #include "ieee80211_i.h" #include "debugfs.h" #include "debugfs_sta.h" #include "sta_info.h" /* sta attributtes */ #define STA_READ(name, buflen, field, format_string) \ static ssize_t sta_ ##name## _read(struct file *file, \ char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ int res; \ struct sta_info *sta = file->private_data; \ char buf[buflen]; \ res = scnprintf(buf, buflen, format_string, sta->field); \ return simple_read_from_buffer(userbuf, count, ppos, buf, res); \ } #define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n") #define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n") #define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n") #define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") #define STA_OPS(name) \ static const struct file_operations sta_ ##name## _ops = { \ .read = sta_##name##_read, \ .open = mac80211_open_file_generic, \ } #define STA_OPS_RW(name) \ static const struct file_operations sta_ ##name## _ops = { \ .read = sta_##name##_read, \ .write = sta_##name##_write, \ .open = mac80211_open_file_generic, \ } #define STA_FILE(name, field, format) \ STA_READ_##format(name, field) \ STA_OPS(name) STA_FILE(aid, sta.aid, D); STA_FILE(dev, sdata->name, S); STA_FILE(rx_packets, rx_packets, LU); STA_FILE(tx_packets, tx_packets, LU); STA_FILE(rx_bytes, rx_bytes, LU); STA_FILE(tx_bytes, tx_bytes, LU); STA_FILE(rx_duplicates, num_duplicates, LU); STA_FILE(rx_fragments, rx_fragments, LU); STA_FILE(rx_dropped, rx_dropped, LU); STA_FILE(tx_fragments, tx_fragments, LU); STA_FILE(tx_filtered, tx_filtered_count, LU); STA_FILE(tx_retry_failed, tx_retry_failed, LU); STA_FILE(tx_retry_count, tx_retry_count, LU); STA_FILE(last_signal, last_signal, D); STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); static ssize_t sta_flags_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char buf[100]; struct sta_info *sta = file->private_data; u32 staflags = get_sta_flags(sta); int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s", staflags & WLAN_STA_AUTH ? "AUTH\n" : "", staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "", staflags & WLAN_STA_PS_STA ? "PS (sta)\n" : "", staflags & WLAN_STA_PS_DRIVER ? "PS (driver)\n" : "", staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "", staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "", staflags & WLAN_STA_WME ? "WME\n" : "", staflags & WLAN_STA_WDS ? "WDS\n" : "", staflags & WLAN_STA_MFP ? "MFP\n" : ""); return simple_read_from_buffer(userbuf, count, ppos, buf, res); } STA_OPS(flags); static ssize_t sta_num_ps_buf_frames_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char buf[20]; struct sta_info *sta = file->private_data; int res = scnprintf(buf, sizeof(buf), "%u\n", skb_queue_len(&sta->ps_tx_buf)); return simple_read_from_buffer(userbuf, count, ppos, buf, res); } STA_OPS(num_ps_buf_frames); static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char buf[20]; struct sta_info *sta = file->private_data; int res = scnprintf(buf, sizeof(buf), "%d\n", jiffies_to_msecs(jiffies - sta->last_rx)); return simple_read_from_buffer(userbuf, count, ppos, buf, res); } STA_OPS(inactive_ms); static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char buf[15*NUM_RX_DATA_QUEUES], *p = buf; int i; struct sta_info *sta = file->private_data; for (i = 0; i < NUM_RX_DATA_QUEUES; i++) p += scnprintf(p, sizeof(buf)+buf-p, "%x ", le16_to_cpu(sta->last_seq_ctrl[i])); p += scnprintf(p, sizeof(buf)+buf-p, "\n"); return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); } STA_OPS(last_seq_ctrl); static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { char buf[71 + STA_TID_NUM * 40], *p = buf; int i; struct sta_info *sta = file->private_data; spin_lock_bh(&sta->lock); p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", sta->ampdu_mlme.dialog_token_allocator + 1); p += scnprintf(p, sizeof(buf) + buf - p, "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n"); for (i = 0; i < STA_TID_NUM; i++) { p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", sta->ampdu_mlme.tid_active_rx[i]); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", sta->ampdu_mlme.tid_active_rx[i] ? sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", sta->ampdu_mlme.tid_active_rx[i] ? sta->ampdu_mlme.tid_rx[i]->ssn : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", sta->ampdu_mlme.tid_state_tx[i]); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", sta->ampdu_mlme.tid_state_tx[i] ? sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", sta->ampdu_mlme.tid_state_tx[i] ? sta->ampdu_mlme.tid_tx[i]->ssn : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", sta->ampdu_mlme.tid_state_tx[i] ? skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); p += scnprintf(p, sizeof(buf) + buf - p, "\n"); } spin_unlock_bh(&sta->lock); return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); } static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { char _buf[12], *buf = _buf; struct sta_info *sta = file->private_data; bool start, tx; unsigned long tid; int ret; if (count > sizeof(_buf)) return -EINVAL; if (copy_from_user(buf, userbuf, count)) return -EFAULT; buf[sizeof(_buf) - 1] = '\0'; if (strncmp(buf, "tx ", 3) == 0) { buf += 3; tx = true; } else if (strncmp(buf, "rx ", 3) == 0) { buf += 3; tx = false; } else return -EINVAL; if (strncmp(buf, "start ", 6) == 0) { buf += 6; start = true; if (!tx) return -EINVAL; } else if (strncmp(buf, "stop ", 5) == 0) { buf += 5; start = false; } else return -EINVAL; tid = simple_strtoul(buf, NULL, 0); if (tid >= STA_TID_NUM) return -EINVAL; if (tx) { if (start) ret = ieee80211_start_tx_ba_session(&sta->sta, tid); else ret = ieee80211_stop_tx_ba_session(&sta->sta, tid, WLAN_BACK_RECIPIENT); } else { __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3); ret = 0; } return ret ?: count; } STA_OPS_RW(agg_status); static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { #define PRINT_HT_CAP(_cond, _str) \ do { \ if (_cond) \ p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \ } while (0) char buf[512], *p = buf; int i; struct sta_info *sta = file->private_data; struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap; p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n", htc->ht_supported ? "" : "not "); if (htc->ht_supported) { p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap); PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC"); PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40"); PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20"); PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save"); PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save"); PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled"); PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield"); PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI"); PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI"); PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC"); PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC"); PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream"); PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams"); PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams"); PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack"); PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: " "3839 bytes"); PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: " "7935 bytes"); /* * For beacons and probe response this would mean the BSS * does or does not allow the usage of DSSS/CCK HT40. * Otherwise it means the STA does or does not use * DSSS/CCK HT40. */ PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40"); PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40"); /* BIT(13) is reserved */ PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant"); PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection"); p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n", htc->ampdu_factor, htc->ampdu_density); p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:"); for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) p += scnprintf(p, sizeof(buf)+buf-p, " %.2x", htc->mcs.rx_mask[i]); p += scnprintf(p, sizeof(buf)+buf-p, "\n"); /* If not set this is meaningless */ if (le16_to_cpu(htc->mcs.rx_highest)) { p += scnprintf(p, sizeof(buf)+buf-p, "MCS rx highest: %d Mbps\n", le16_to_cpu(htc->mcs.rx_highest)); } p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n", htc->mcs.tx_params); } return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); } STA_OPS(ht_capa); #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, \ sta->debugfs.dir, sta, &sta_ ##name## _ops); void ieee80211_sta_debugfs_add(struct sta_info *sta) { struct dentry *stations_dir = sta->local->debugfs.stations; u8 mac[3*ETH_ALEN]; sta->debugfs.add_has_run = true; if (!stations_dir) return; snprintf(mac, sizeof(mac), "%pM", sta->sta.addr); /* * This might fail due to a race condition: * When mac80211 unlinks a station, the debugfs entries * remain, but it is already possible to link a new * station with the same address which triggers adding * it to debugfs; therefore, if the old station isn't * destroyed quickly enough the old station's debugfs * dir might still be around. */ sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); if (!sta->debugfs.dir) return; DEBUGFS_ADD(flags); DEBUGFS_ADD(num_ps_buf_frames); DEBUGFS_ADD(inactive_ms); DEBUGFS_ADD(last_seq_ctrl); DEBUGFS_ADD(agg_status); DEBUGFS_ADD(dev); DEBUGFS_ADD(rx_packets); DEBUGFS_ADD(tx_packets); DEBUGFS_ADD(rx_bytes); DEBUGFS_ADD(tx_bytes); DEBUGFS_ADD(rx_duplicates); DEBUGFS_ADD(rx_fragments); DEBUGFS_ADD(rx_dropped); DEBUGFS_ADD(tx_fragments); DEBUGFS_ADD(tx_filtered); DEBUGFS_ADD(tx_retry_failed); DEBUGFS_ADD(tx_retry_count); DEBUGFS_ADD(last_signal); DEBUGFS_ADD(wep_weak_iv_count); DEBUGFS_ADD(ht_capa); } void ieee80211_sta_debugfs_remove(struct sta_info *sta) { debugfs_remove_recursive(sta->debugfs.dir); sta->debugfs.dir = NULL; }
gpl-2.0
sktjdgns1189/android_kernel_samsung_SHW-M190S-4.2.2
drivers/media/video/gspca/t613.c
760
36086
/* * V4L2 by Jean-Francois Moine <http://moinejf.free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * *Notes: * t613 + tas5130A * * Focus to light do not balance well as in win. * Quality in win is not good, but its kinda better. * * Fix some "extraneous bytes", most of apps will show the image anyway * * Gamma table, is there, but its really doing something? * * 7~8 Fps, its ok, max on win its 10. * Costantino Leandro */ #define MODULE_NAME "t613" #include "gspca.h" #define V4L2_CID_EFFECTS (V4L2_CID_PRIVATE_BASE + 0) MODULE_AUTHOR("Leandro Costantino <le_costantino@pixartargentina.com.ar>"); MODULE_DESCRIPTION("GSPCA/T613 (JPEG Compliance) USB Camera Driver"); MODULE_LICENSE("GPL"); struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ u8 brightness; u8 contrast; u8 colors; u8 autogain; u8 gamma; u8 sharpness; u8 freq; u8 red_balance; /* split balance */ u8 blue_balance; u8 global_gain; /* aka gain */ u8 whitebalance; /* set default r/g/b and activate */ u8 mirror; u8 effect; u8 sensor; #define SENSOR_OM6802 0 #define SENSOR_OTHER 1 #define SENSOR_TAS5130A 2 #define SENSOR_LT168G 3 /* must verify if this is the actual model */ }; /* V4L2 controls supported by the driver */ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val); static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val); static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setlowlight(struct gspca_dev *gspca_dev, __s32 val); static int sd_getlowlight(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val); static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val); static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val); static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setwhitebalance(struct gspca_dev *gspca_dev, __s32 val); static int sd_getwhitebalance(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val); static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val); static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setglobal_gain(struct gspca_dev *gspca_dev, __s32 val); static int sd_getglobal_gain(struct gspca_dev *gspca_dev, __s32 *val); static int sd_setflip(struct gspca_dev *gspca_dev, __s32 val); static int sd_getflip(struct gspca_dev *gspca_dev, __s32 *val); static int sd_seteffect(struct gspca_dev *gspca_dev, __s32 val); static int sd_geteffect(struct gspca_dev *gspca_dev, __s32 *val); static int sd_querymenu(struct gspca_dev *gspca_dev, struct v4l2_querymenu *menu); static const struct ctrl sd_ctrls[] = { { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 14, .step = 1, #define BRIGHTNESS_DEF 8 .default_value = BRIGHTNESS_DEF, }, .set = sd_setbrightness, .get = sd_getbrightness, }, { { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = 0, .maximum = 0x0d, .step = 1, #define CONTRAST_DEF 0x07 .default_value = CONTRAST_DEF, }, .set = sd_setcontrast, .get = sd_getcontrast, }, { { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Color", .minimum = 0, .maximum = 0x0f, .step = 1, #define COLORS_DEF 0x05 .default_value = COLORS_DEF, }, .set = sd_setcolors, .get = sd_getcolors, }, #define GAMMA_MAX 16 #define GAMMA_DEF 10 { { .id = V4L2_CID_GAMMA, /* (gamma on win) */ .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gamma", .minimum = 0, .maximum = GAMMA_MAX - 1, .step = 1, .default_value = GAMMA_DEF, }, .set = sd_setgamma, .get = sd_getgamma, }, { { .id = V4L2_CID_BACKLIGHT_COMPENSATION, /* Activa lowlight, * some apps dont bring up the * backligth_compensation control) */ .type = V4L2_CTRL_TYPE_INTEGER, .name = "Low Light", .minimum = 0, .maximum = 1, .step = 1, #define AUTOGAIN_DEF 0x01 .default_value = AUTOGAIN_DEF, }, .set = sd_setlowlight, .get = sd_getlowlight, }, { { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Mirror Image", .minimum = 0, .maximum = 1, .step = 1, #define MIRROR_DEF 0 .default_value = MIRROR_DEF, }, .set = sd_setflip, .get = sd_getflip }, { { .id = V4L2_CID_POWER_LINE_FREQUENCY, .type = V4L2_CTRL_TYPE_MENU, .name = "Light Frequency Filter", .minimum = 1, /* 1 -> 0x50, 2->0x60 */ .maximum = 2, .step = 1, #define FREQ_DEF 1 .default_value = FREQ_DEF, }, .set = sd_setfreq, .get = sd_getfreq}, { { .id = V4L2_CID_AUTO_WHITE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "White Balance", .minimum = 0, .maximum = 1, .step = 1, #define WHITE_BALANCE_DEF 0 .default_value = WHITE_BALANCE_DEF, }, .set = sd_setwhitebalance, .get = sd_getwhitebalance }, { { .id = V4L2_CID_SHARPNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Sharpness", .minimum = 0, .maximum = 15, .step = 1, #define SHARPNESS_DEF 0x06 .default_value = SHARPNESS_DEF, }, .set = sd_setsharpness, .get = sd_getsharpness, }, { { .id = V4L2_CID_EFFECTS, .type = V4L2_CTRL_TYPE_MENU, .name = "Webcam Effects", .minimum = 0, .maximum = 4, .step = 1, #define EFFECTS_DEF 0 .default_value = EFFECTS_DEF, }, .set = sd_seteffect, .get = sd_geteffect }, { { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Blue Balance", .minimum = 0x10, .maximum = 0x40, .step = 1, #define BLUE_BALANCE_DEF 0x20 .default_value = BLUE_BALANCE_DEF, }, .set = sd_setblue_balance, .get = sd_getblue_balance, }, { { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Red Balance", .minimum = 0x10, .maximum = 0x40, .step = 1, #define RED_BALANCE_DEF 0x20 .default_value = RED_BALANCE_DEF, }, .set = sd_setred_balance, .get = sd_getred_balance, }, { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gain", .minimum = 0x10, .maximum = 0x40, .step = 1, #define global_gain_DEF 0x20 .default_value = global_gain_DEF, }, .set = sd_setglobal_gain, .get = sd_getglobal_gain, }, }; static char *effects_control[] = { "Normal", "Emboss", /* disabled */ "Monochrome", "Sepia", "Sketch", "Sun Effect", /* disabled */ "Negative", }; static const struct v4l2_pix_format vga_mode_t16[] = { {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 4}, {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 3}, {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 2}, {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; /* sensor specific data */ struct additional_sensor_data { const u8 n3[6]; const u8 *n4, n4sz; const u8 reg80, reg8e; const u8 nset8[6]; const u8 data1[10]; const u8 data2[9]; const u8 data3[9]; const u8 data4[4]; const u8 data5[6]; const u8 stream[4]; }; static const u8 n4_om6802[] = { 0x09, 0x01, 0x12, 0x04, 0x66, 0x8a, 0x80, 0x3c, 0x81, 0x22, 0x84, 0x50, 0x8a, 0x78, 0x8b, 0x68, 0x8c, 0x88, 0x8e, 0x33, 0x8f, 0x24, 0xaa, 0xb1, 0xa2, 0x60, 0xa5, 0x30, 0xa6, 0x3a, 0xa8, 0xe8, 0xae, 0x05, 0xb1, 0x00, 0xbb, 0x04, 0xbc, 0x48, 0xbe, 0x36, 0xc6, 0x88, 0xe9, 0x00, 0xc5, 0xc0, 0x65, 0x0a, 0xbb, 0x86, 0xaf, 0x58, 0xb0, 0x68, 0x87, 0x40, 0x89, 0x2b, 0x8d, 0xff, 0x83, 0x40, 0xac, 0x84, 0xad, 0x86, 0xaf, 0x46 }; static const u8 n4_other[] = { 0x66, 0x00, 0x7f, 0x00, 0x80, 0xac, 0x81, 0x69, 0x84, 0x40, 0x85, 0x70, 0x86, 0x20, 0x8a, 0x68, 0x8b, 0x58, 0x8c, 0x88, 0x8d, 0xff, 0x8e, 0xb8, 0x8f, 0x28, 0xa2, 0x60, 0xa5, 0x40, 0xa8, 0xa8, 0xac, 0x84, 0xad, 0x84, 0xae, 0x24, 0xaf, 0x56, 0xb0, 0x68, 0xb1, 0x00, 0xb2, 0x88, 0xbb, 0xc5, 0xbc, 0x4a, 0xbe, 0x36, 0xc2, 0x88, 0xc5, 0xc0, 0xc6, 0xda, 0xe9, 0x26, 0xeb, 0x00 }; static const u8 n4_tas5130a[] = { 0x80, 0x3c, 0x81, 0x68, 0x83, 0xa0, 0x84, 0x20, 0x8a, 0x68, 0x8b, 0x58, 0x8c, 0x88, 0x8e, 0xb4, 0x8f, 0x24, 0xa1, 0xb1, 0xa2, 0x30, 0xa5, 0x10, 0xa6, 0x4a, 0xae, 0x03, 0xb1, 0x44, 0xb2, 0x08, 0xb7, 0x06, 0xb9, 0xe7, 0xbb, 0xc4, 0xbc, 0x4a, 0xbe, 0x36, 0xbf, 0xff, 0xc2, 0x88, 0xc5, 0xc8, 0xc6, 0xda }; static const u8 n4_lt168g[] = { 0x66, 0x01, 0x7f, 0x00, 0x80, 0x7c, 0x81, 0x28, 0x83, 0x44, 0x84, 0x20, 0x86, 0x20, 0x8a, 0x70, 0x8b, 0x58, 0x8c, 0x88, 0x8d, 0xa0, 0x8e, 0xb3, 0x8f, 0x24, 0xa1, 0xb0, 0xa2, 0x38, 0xa5, 0x20, 0xa6, 0x4a, 0xa8, 0xe8, 0xaf, 0x38, 0xb0, 0x68, 0xb1, 0x44, 0xb2, 0x88, 0xbb, 0x86, 0xbd, 0x40, 0xbe, 0x26, 0xc1, 0x05, 0xc2, 0x88, 0xc5, 0xc0, 0xda, 0x8e, 0xdb, 0xca, 0xdc, 0xa8, 0xdd, 0x8c, 0xde, 0x44, 0xdf, 0x0c, 0xe9, 0x80 }; static const struct additional_sensor_data sensor_data[] = { { /* 0: OM6802 */ .n3 = {0x61, 0x68, 0x65, 0x0a, 0x60, 0x04}, .n4 = n4_om6802, .n4sz = sizeof n4_om6802, .reg80 = 0x3c, .reg8e = 0x33, .nset8 = {0xa8, 0xf0, 0xc6, 0x88, 0xc0, 0x00}, .data1 = {0xc2, 0x28, 0x0f, 0x22, 0xcd, 0x27, 0x2c, 0x06, 0xb3, 0xfc}, .data2 = {0x80, 0xff, 0xff, 0x80, 0xff, 0xff, 0x80, 0xff, 0xff}, .data3 = {0x80, 0xff, 0xff, 0x80, 0xff, 0xff, 0x80, 0xff, 0xff}, .data4 = /*Freq (50/60Hz). Splitted for test purpose */ {0x66, 0xca, 0xa8, 0xf0}, .data5 = /* this could be removed later */ {0x0c, 0x03, 0xab, 0x13, 0x81, 0x23}, .stream = {0x0b, 0x04, 0x0a, 0x78}, }, { /* 1: OTHER */ .n3 = {0x61, 0xc2, 0x65, 0x88, 0x60, 0x00}, .n4 = n4_other, .n4sz = sizeof n4_other, .reg80 = 0xac, .reg8e = 0xb8, .nset8 = {0xa8, 0xa8, 0xc6, 0xda, 0xc0, 0x00}, .data1 = {0xc1, 0x48, 0x04, 0x1b, 0xca, 0x2e, 0x33, 0x3a, 0xe8, 0xfc}, .data2 = {0x4e, 0x9c, 0xec, 0x40, 0x80, 0xc0, 0x48, 0x96, 0xd9}, .data3 = {0x4e, 0x9c, 0xec, 0x40, 0x80, 0xc0, 0x48, 0x96, 0xd9}, .data4 = {0x66, 0x00, 0xa8, 0xa8}, .data5 = {0x0c, 0x03, 0xab, 0x29, 0x81, 0x69}, .stream = {0x0b, 0x04, 0x0a, 0x00}, }, { /* 2: TAS5130A */ .n3 = {0x61, 0xc2, 0x65, 0x0d, 0x60, 0x08}, .n4 = n4_tas5130a, .n4sz = sizeof n4_tas5130a, .reg80 = 0x3c, .reg8e = 0xb4, .nset8 = {0xa8, 0xf0, 0xc6, 0xda, 0xc0, 0x00}, .data1 = {0xbb, 0x28, 0x10, 0x10, 0xbb, 0x28, 0x1e, 0x27, 0xc8, 0xfc}, .data2 = {0x60, 0xa8, 0xe0, 0x60, 0xa8, 0xe0, 0x60, 0xa8, 0xe0}, .data3 = {0x60, 0xa8, 0xe0, 0x60, 0xa8, 0xe0, 0x60, 0xa8, 0xe0}, .data4 = /* Freq (50/60Hz). Splitted for test purpose */ {0x66, 0x00, 0xa8, 0xe8}, .data5 = {0x0c, 0x03, 0xab, 0x10, 0x81, 0x20}, .stream = {0x0b, 0x04, 0x0a, 0x40}, }, { /* 3: LT168G */ .n3 = {0x61, 0xc2, 0x65, 0x68, 0x60, 0x00}, .n4 = n4_lt168g, .n4sz = sizeof n4_lt168g, .reg80 = 0x7c, .reg8e = 0xb3, .nset8 = {0xa8, 0xf0, 0xc6, 0xba, 0xc0, 0x00}, .data1 = {0xc0, 0x38, 0x08, 0x10, 0xc0, 0x30, 0x10, 0x40, 0xb0, 0xf4}, .data2 = {0x40, 0x80, 0xc0, 0x50, 0xa0, 0xf0, 0x53, 0xa6, 0xff}, .data3 = {0x40, 0x80, 0xc0, 0x50, 0xa0, 0xf0, 0x53, 0xa6, 0xff}, .data4 = {0x66, 0x41, 0xa8, 0xf0}, .data5 = {0x0c, 0x03, 0xab, 0x4b, 0x81, 0x2b}, .stream = {0x0b, 0x04, 0x0a, 0x28}, }, }; #define MAX_EFFECTS 7 /* easily done by soft, this table could be removed, * i keep it here just in case */ static const u8 effects_table[MAX_EFFECTS][6] = { {0xa8, 0xe8, 0xc6, 0xd2, 0xc0, 0x00}, /* Normal */ {0xa8, 0xc8, 0xc6, 0x52, 0xc0, 0x04}, /* Repujar */ {0xa8, 0xe8, 0xc6, 0xd2, 0xc0, 0x20}, /* Monochrome */ {0xa8, 0xe8, 0xc6, 0xd2, 0xc0, 0x80}, /* Sepia */ {0xa8, 0xc8, 0xc6, 0x52, 0xc0, 0x02}, /* Croquis */ {0xa8, 0xc8, 0xc6, 0xd2, 0xc0, 0x10}, /* Sun Effect */ {0xa8, 0xc8, 0xc6, 0xd2, 0xc0, 0x40}, /* Negative */ }; static const u8 gamma_table[GAMMA_MAX][17] = { {0x00, 0x3e, 0x69, 0x85, 0x95, 0xa1, 0xae, 0xb9, /* 0 */ 0xc2, 0xcb, 0xd4, 0xdb, 0xe3, 0xea, 0xf1, 0xf8, 0xff}, {0x00, 0x33, 0x5a, 0x75, 0x85, 0x93, 0xa1, 0xad, /* 1 */ 0xb7, 0xc2, 0xcb, 0xd4, 0xde, 0xe7, 0xf0, 0xf7, 0xff}, {0x00, 0x2f, 0x51, 0x6b, 0x7c, 0x8a, 0x99, 0xa6, /* 2 */ 0xb1, 0xbc, 0xc6, 0xd0, 0xdb, 0xe4, 0xed, 0xf6, 0xff}, {0x00, 0x29, 0x48, 0x60, 0x72, 0x81, 0x90, 0x9e, /* 3 */ 0xaa, 0xb5, 0xbf, 0xcb, 0xd6, 0xe1, 0xeb, 0xf5, 0xff}, {0x00, 0x23, 0x3f, 0x55, 0x68, 0x77, 0x86, 0x95, /* 4 */ 0xa2, 0xad, 0xb9, 0xc6, 0xd2, 0xde, 0xe9, 0xf4, 0xff}, {0x00, 0x1b, 0x33, 0x48, 0x59, 0x69, 0x79, 0x87, /* 5 */ 0x96, 0xa3, 0xb1, 0xbe, 0xcc, 0xda, 0xe7, 0xf3, 0xff}, {0x00, 0x02, 0x10, 0x20, 0x32, 0x40, 0x57, 0x67, /* 6 */ 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff}, {0x00, 0x02, 0x14, 0x26, 0x38, 0x4a, 0x60, 0x70, /* 7 */ 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xff}, {0x00, 0x10, 0x22, 0x35, 0x47, 0x5a, 0x69, 0x79, /* 8 */ 0x88, 0x97, 0xa7, 0xb6, 0xc4, 0xd3, 0xe0, 0xf0, 0xff}, {0x00, 0x10, 0x26, 0x40, 0x54, 0x65, 0x75, 0x84, /* 9 */ 0x93, 0xa1, 0xb0, 0xbd, 0xca, 0xd6, 0xe0, 0xf0, 0xff}, {0x00, 0x18, 0x2b, 0x44, 0x60, 0x70, 0x80, 0x8e, /* 10 */ 0x9c, 0xaa, 0xb7, 0xc4, 0xd0, 0xd8, 0xe2, 0xf0, 0xff}, {0x00, 0x1a, 0x34, 0x52, 0x66, 0x7e, 0x8d, 0x9b, /* 11 */ 0xa8, 0xb4, 0xc0, 0xcb, 0xd6, 0xe1, 0xeb, 0xf5, 0xff}, {0x00, 0x3f, 0x5a, 0x6e, 0x7f, 0x8e, 0x9c, 0xa8, /* 12 */ 0xb4, 0xbf, 0xc9, 0xd3, 0xdc, 0xe5, 0xee, 0xf6, 0xff}, {0x00, 0x54, 0x6f, 0x83, 0x93, 0xa0, 0xad, 0xb7, /* 13 */ 0xc2, 0xcb, 0xd4, 0xdc, 0xe4, 0xeb, 0xf2, 0xf9, 0xff}, {0x00, 0x6e, 0x88, 0x9a, 0xa8, 0xb3, 0xbd, 0xc6, /* 14 */ 0xcf, 0xd6, 0xdd, 0xe3, 0xe9, 0xef, 0xf4, 0xfa, 0xff}, {0x00, 0x93, 0xa8, 0xb7, 0xc1, 0xca, 0xd2, 0xd8, /* 15 */ 0xde, 0xe3, 0xe8, 0xed, 0xf1, 0xf5, 0xf8, 0xfc, 0xff} }; static const u8 tas5130a_sensor_init[][8] = { {0x62, 0x08, 0x63, 0x70, 0x64, 0x1d, 0x60, 0x09}, {0x62, 0x20, 0x63, 0x01, 0x64, 0x02, 0x60, 0x09}, {0x62, 0x07, 0x63, 0x03, 0x64, 0x00, 0x60, 0x09}, }; static u8 sensor_reset[] = {0x61, 0x68, 0x62, 0xff, 0x60, 0x07}; /* read 1 byte */ static u8 reg_r(struct gspca_dev *gspca_dev, u16 index) { usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), 0, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, /* value */ index, gspca_dev->usb_buf, 1, 500); return gspca_dev->usb_buf[0]; } static void reg_w(struct gspca_dev *gspca_dev, u16 index) { usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, NULL, 0, 500); } static void reg_w_buf(struct gspca_dev *gspca_dev, const u8 *buffer, u16 len) { if (len <= USB_BUF_SZ) { memcpy(gspca_dev->usb_buf, buffer, len); usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x01, 0, gspca_dev->usb_buf, len, 500); } else { u8 *tmpbuf; tmpbuf = kmalloc(len, GFP_KERNEL); if (!tmpbuf) { err("Out of memory"); return; } memcpy(tmpbuf, buffer, len); usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x01, 0, tmpbuf, len, 500); kfree(tmpbuf); } } /* write values to consecutive registers */ static void reg_w_ixbuf(struct gspca_dev *gspca_dev, u8 reg, const u8 *buffer, u16 len) { int i; u8 *p, *tmpbuf; if (len * 2 <= USB_BUF_SZ) { p = tmpbuf = gspca_dev->usb_buf; } else { p = tmpbuf = kmalloc(len * 2, GFP_KERNEL); if (!tmpbuf) { err("Out of memory"); return; } } i = len; while (--i >= 0) { *p++ = reg++; *p++ = *buffer++; } usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x01, 0, tmpbuf, len * 2, 500); if (len * 2 > USB_BUF_SZ) kfree(tmpbuf); } /* Reported as OM6802*/ static void om6802_sensor_init(struct gspca_dev *gspca_dev) { int i; const u8 *p; u8 byte; u8 val[6] = {0x62, 0, 0x64, 0, 0x60, 0x05}; static const u8 sensor_init[] = { 0xdf, 0x6d, 0xdd, 0x18, 0x5a, 0xe0, 0x5c, 0x07, 0x5d, 0xb0, 0x5e, 0x1e, 0x60, 0x71, 0xef, 0x00, 0xe9, 0x00, 0xea, 0x00, 0x90, 0x24, 0x91, 0xb2, 0x82, 0x32, 0xfd, 0x41, 0x00 /* table end */ }; reg_w_buf(gspca_dev, sensor_reset, sizeof sensor_reset); msleep(100); i = 4; while (--i > 0) { byte = reg_r(gspca_dev, 0x0060); if (!(byte & 0x01)) break; msleep(100); } byte = reg_r(gspca_dev, 0x0063); if (byte != 0x17) { err("Bad sensor reset %02x", byte); /* continue? */ } p = sensor_init; while (*p != 0) { val[1] = *p++; val[3] = *p++; if (*p == 0) reg_w(gspca_dev, 0x3c80); reg_w_buf(gspca_dev, val, sizeof val); i = 4; while (--i >= 0) { msleep(15); byte = reg_r(gspca_dev, 0x60); if (!(byte & 0x01)) break; } } msleep(15); reg_w(gspca_dev, 0x3c80); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; cam = &gspca_dev->cam; cam->cam_mode = vga_mode_t16; cam->nmodes = ARRAY_SIZE(vga_mode_t16); sd->brightness = BRIGHTNESS_DEF; sd->contrast = CONTRAST_DEF; sd->colors = COLORS_DEF; sd->gamma = GAMMA_DEF; sd->autogain = AUTOGAIN_DEF; sd->mirror = MIRROR_DEF; sd->freq = FREQ_DEF; sd->whitebalance = WHITE_BALANCE_DEF; sd->sharpness = SHARPNESS_DEF; sd->effect = EFFECTS_DEF; sd->red_balance = RED_BALANCE_DEF; sd->blue_balance = BLUE_BALANCE_DEF; sd->global_gain = global_gain_DEF; return 0; } static void setbrightness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; unsigned int brightness; u8 set6[4] = { 0x8f, 0x24, 0xc3, 0x00 }; brightness = sd->brightness; if (brightness < 7) { set6[1] = 0x26; set6[3] = 0x70 - brightness * 0x10; } else { set6[3] = 0x00 + ((brightness - 7) * 0x10); } reg_w_buf(gspca_dev, set6, sizeof set6); } static void setcontrast(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; unsigned int contrast = sd->contrast; u16 reg_to_write; if (contrast < 7) reg_to_write = 0x8ea9 - contrast * 0x200; else reg_to_write = 0x00a9 + (contrast - 7) * 0x200; reg_w(gspca_dev, reg_to_write); } static void setcolors(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 reg_to_write; reg_to_write = 0x80bb + sd->colors * 0x100; /* was 0xc0 */ reg_w(gspca_dev, reg_to_write); } static void setgamma(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; PDEBUG(D_CONF, "Gamma: %d", sd->gamma); reg_w_ixbuf(gspca_dev, 0x90, gamma_table[sd->gamma], sizeof gamma_table[0]); } static void setglobalgain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w(gspca_dev, (sd->red_balance << 8) + 0x87); reg_w(gspca_dev, (sd->blue_balance << 8) + 0x88); reg_w(gspca_dev, (sd->global_gain << 8) + 0x89); } /* Generic fnc for r/b balance, exposure and whitebalance */ static void setbalance(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; /* on whitebalance leave defaults values */ if (sd->whitebalance) { reg_w(gspca_dev, 0x3c80); } else { reg_w(gspca_dev, 0x3880); /* shoud we wait here.. */ /* update and reset 'global gain' with webcam parameters */ sd->red_balance = reg_r(gspca_dev, 0x0087); sd->blue_balance = reg_r(gspca_dev, 0x0088); sd->global_gain = reg_r(gspca_dev, 0x0089); setglobalgain(gspca_dev); } } static void setwhitebalance(struct gspca_dev *gspca_dev) { setbalance(gspca_dev); } static void setsharpness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 reg_to_write; reg_to_write = 0x0aa6 + 0x1000 * sd->sharpness; reg_w(gspca_dev, reg_to_write); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { /* some of this registers are not really neded, because * they are overriden by setbrigthness, setcontrast, etc, * but wont hurt anyway, and can help someone with similar webcam * to see the initial parameters.*/ struct sd *sd = (struct sd *) gspca_dev; const struct additional_sensor_data *sensor; int i; u16 sensor_id; u8 test_byte = 0; static const u8 read_indexs[] = { 0x0a, 0x0b, 0x66, 0x80, 0x81, 0x8e, 0x8f, 0xa5, 0xa6, 0xa8, 0xbb, 0xbc, 0xc6, 0x00 }; static const u8 n1[] = {0x08, 0x03, 0x09, 0x03, 0x12, 0x04}; static const u8 n2[] = {0x08, 0x00}; sensor_id = (reg_r(gspca_dev, 0x06) << 8) | reg_r(gspca_dev, 0x07); switch (sensor_id & 0xff0f) { case 0x0801: PDEBUG(D_PROBE, "sensor tas5130a"); sd->sensor = SENSOR_TAS5130A; break; case 0x0802: PDEBUG(D_PROBE, "sensor lt168g"); sd->sensor = SENSOR_LT168G; break; case 0x0803: PDEBUG(D_PROBE, "sensor 'other'"); sd->sensor = SENSOR_OTHER; break; case 0x0807: PDEBUG(D_PROBE, "sensor om6802"); sd->sensor = SENSOR_OM6802; break; default: PDEBUG(D_ERR|D_PROBE, "unknown sensor %04x", sensor_id); return -EINVAL; } if (sd->sensor == SENSOR_OM6802) { reg_w_buf(gspca_dev, n1, sizeof n1); i = 5; while (--i >= 0) { reg_w_buf(gspca_dev, sensor_reset, sizeof sensor_reset); test_byte = reg_r(gspca_dev, 0x0063); msleep(100); if (test_byte == 0x17) break; /* OK */ } if (i < 0) { err("Bad sensor reset %02x", test_byte); return -EIO; } reg_w_buf(gspca_dev, n2, sizeof n2); } i = 0; while (read_indexs[i] != 0x00) { test_byte = reg_r(gspca_dev, read_indexs[i]); PDEBUG(D_STREAM, "Reg 0x%02x = 0x%02x", read_indexs[i], test_byte); i++; } sensor = &sensor_data[sd->sensor]; reg_w_buf(gspca_dev, sensor->n3, sizeof sensor->n3); reg_w_buf(gspca_dev, sensor->n4, sensor->n4sz); if (sd->sensor == SENSOR_LT168G) { test_byte = reg_r(gspca_dev, 0x80); PDEBUG(D_STREAM, "Reg 0x%02x = 0x%02x", 0x80, test_byte); reg_w(gspca_dev, 0x6c80); } reg_w_ixbuf(gspca_dev, 0xd0, sensor->data1, sizeof sensor->data1); reg_w_ixbuf(gspca_dev, 0xc7, sensor->data2, sizeof sensor->data2); reg_w_ixbuf(gspca_dev, 0xe0, sensor->data3, sizeof sensor->data3); reg_w(gspca_dev, (sensor->reg80 << 8) + 0x80); reg_w(gspca_dev, (sensor->reg80 << 8) + 0x80); reg_w(gspca_dev, (sensor->reg8e << 8) + 0x8e); setbrightness(gspca_dev); setcontrast(gspca_dev); setgamma(gspca_dev); setcolors(gspca_dev); setsharpness(gspca_dev); setwhitebalance(gspca_dev); reg_w(gspca_dev, 0x2087); /* tied to white balance? */ reg_w(gspca_dev, 0x2088); reg_w(gspca_dev, 0x2089); reg_w_buf(gspca_dev, sensor->data4, sizeof sensor->data4); reg_w_buf(gspca_dev, sensor->data5, sizeof sensor->data5); reg_w_buf(gspca_dev, sensor->nset8, sizeof sensor->nset8); reg_w_buf(gspca_dev, sensor->stream, sizeof sensor->stream); if (sd->sensor == SENSOR_LT168G) { test_byte = reg_r(gspca_dev, 0x80); PDEBUG(D_STREAM, "Reg 0x%02x = 0x%02x", 0x80, test_byte); reg_w(gspca_dev, 0x6c80); } reg_w_ixbuf(gspca_dev, 0xd0, sensor->data1, sizeof sensor->data1); reg_w_ixbuf(gspca_dev, 0xc7, sensor->data2, sizeof sensor->data2); reg_w_ixbuf(gspca_dev, 0xe0, sensor->data3, sizeof sensor->data3); return 0; } static void setflip(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 flipcmd[8] = {0x62, 0x07, 0x63, 0x03, 0x64, 0x00, 0x60, 0x09}; if (sd->mirror) flipcmd[3] = 0x01; reg_w_buf(gspca_dev, flipcmd, sizeof flipcmd); } static void seteffect(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w_buf(gspca_dev, effects_table[sd->effect], sizeof effects_table[0]); if (sd->effect == 1 || sd->effect == 5) { PDEBUG(D_CONF, "This effect have been disabled for webcam \"safety\""); return; } if (sd->effect == 1 || sd->effect == 4) reg_w(gspca_dev, 0x4aa6); else reg_w(gspca_dev, 0xfaa6); } static void setlightfreq(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 freq[4] = { 0x66, 0x40, 0xa8, 0xe8 }; if (sd->freq == 2) /* 60hz */ freq[1] = 0x00; reg_w_buf(gspca_dev, freq, sizeof freq); } /* Is this really needed? * i added some module parameters for test with some users */ static void poll_sensor(struct gspca_dev *gspca_dev) { static const u8 poll1[] = {0x67, 0x05, 0x68, 0x81, 0x69, 0x80, 0x6a, 0x82, 0x6b, 0x68, 0x6c, 0x69, 0x72, 0xd9, 0x73, 0x34, 0x74, 0x32, 0x75, 0x92, 0x76, 0x00, 0x09, 0x01, 0x60, 0x14}; static const u8 poll2[] = {0x67, 0x02, 0x68, 0x71, 0x69, 0x72, 0x72, 0xa9, 0x73, 0x02, 0x73, 0x02, 0x60, 0x14}; static const u8 poll3[] = {0x87, 0x3f, 0x88, 0x20, 0x89, 0x2d}; static const u8 poll4[] = {0xa6, 0x0a, 0xea, 0xcf, 0xbe, 0x26, 0xb1, 0x5f, 0xa1, 0xb1, 0xda, 0x6b, 0xdb, 0x98, 0xdf, 0x0c, 0xc2, 0x80, 0xc3, 0x10}; PDEBUG(D_STREAM, "[Sensor requires polling]"); reg_w_buf(gspca_dev, poll1, sizeof poll1); reg_w_buf(gspca_dev, poll2, sizeof poll2); reg_w_buf(gspca_dev, poll3, sizeof poll3); reg_w_buf(gspca_dev, poll4, sizeof poll4); } static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; const struct additional_sensor_data *sensor; int i, mode; u8 t2[] = { 0x07, 0x00, 0x0d, 0x60, 0x0e, 0x80 }; static const u8 t3[] = { 0x07, 0x00, 0x88, 0x02, 0x06, 0x00, 0xe7, 0x01 }; mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; switch (mode) { case 0: /* 640x480 (0x00) */ break; case 1: /* 352x288 */ t2[1] = 0x40; break; case 2: /* 320x240 */ t2[1] = 0x10; break; case 3: /* 176x144 */ t2[1] = 0x50; break; default: /* case 4: * 160x120 */ t2[1] = 0x20; break; } switch (sd->sensor) { case SENSOR_OM6802: om6802_sensor_init(gspca_dev); break; case SENSOR_LT168G: break; case SENSOR_OTHER: break; default: /* case SENSOR_TAS5130A: */ i = 0; for (;;) { reg_w_buf(gspca_dev, tas5130a_sensor_init[i], sizeof tas5130a_sensor_init[0]); if (i >= ARRAY_SIZE(tas5130a_sensor_init) - 1) break; i++; } reg_w(gspca_dev, 0x3c80); /* just in case and to keep sync with logs (for mine) */ reg_w_buf(gspca_dev, tas5130a_sensor_init[i], sizeof tas5130a_sensor_init[0]); reg_w(gspca_dev, 0x3c80); break; } sensor = &sensor_data[sd->sensor]; reg_w_buf(gspca_dev, sensor->data4, sizeof sensor->data4); reg_r(gspca_dev, 0x0012); reg_w_buf(gspca_dev, t2, sizeof t2); reg_w_ixbuf(gspca_dev, 0xb3, t3, sizeof t3); reg_w(gspca_dev, 0x0013); msleep(15); reg_w_buf(gspca_dev, sensor->stream, sizeof sensor->stream); reg_w_buf(gspca_dev, sensor->stream, sizeof sensor->stream); if (sd->sensor == SENSOR_OM6802) poll_sensor(gspca_dev); return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w_buf(gspca_dev, sensor_data[sd->sensor].stream, sizeof sensor_data[sd->sensor].stream); reg_w_buf(gspca_dev, sensor_data[sd->sensor].stream, sizeof sensor_data[sd->sensor].stream); if (sd->sensor == SENSOR_OM6802) { msleep(20); reg_w(gspca_dev, 0x0309); } } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { static u8 ffd9[] = { 0xff, 0xd9 }; if (data[0] == 0x5a) { /* Control Packet, after this came the header again, * but extra bytes came in the packet before this, * sometimes an EOF arrives, sometimes not... */ return; } data += 2; len -= 2; if (data[0] == 0xff && data[1] == 0xd8) { /* extra bytes....., could be processed too but would be * a waste of time, right now leave the application and * libjpeg do it for ourserlves.. */ gspca_frame_add(gspca_dev, LAST_PACKET, ffd9, 2); gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); return; } if (data[len - 2] == 0xff && data[len - 1] == 0xd9) { /* Just in case, i have seen packets with the marker, * other's do not include it... */ len -= 2; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->blue_balance = val; if (gspca_dev->streaming) reg_w(gspca_dev, (val << 8) + 0x88); return 0; } static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->blue_balance; return 0; } static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->red_balance = val; if (gspca_dev->streaming) reg_w(gspca_dev, (val << 8) + 0x87); return 0; } static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->red_balance; return 0; } static int sd_setglobal_gain(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->global_gain = val; if (gspca_dev->streaming) setglobalgain(gspca_dev); return 0; } static int sd_getglobal_gain(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->global_gain; return 0; } static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->brightness = val; if (gspca_dev->streaming) setbrightness(gspca_dev); return 0; } static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->brightness; return *val; } static int sd_setwhitebalance(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->whitebalance = val; if (gspca_dev->streaming) setwhitebalance(gspca_dev); return 0; } static int sd_getwhitebalance(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->whitebalance; return *val; } static int sd_setflip(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->mirror = val; if (gspca_dev->streaming) setflip(gspca_dev); return 0; } static int sd_getflip(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->mirror; return *val; } static int sd_seteffect(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->effect = val; if (gspca_dev->streaming) seteffect(gspca_dev); return 0; } static int sd_geteffect(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->effect; return *val; } static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->contrast = val; if (gspca_dev->streaming) setcontrast(gspca_dev); return 0; } static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->contrast; return *val; } static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->colors = val; if (gspca_dev->streaming) setcolors(gspca_dev); return 0; } static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->colors; return 0; } static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->gamma = val; if (gspca_dev->streaming) setgamma(gspca_dev); return 0; } static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->gamma; return 0; } static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->freq = val; if (gspca_dev->streaming) setlightfreq(gspca_dev); return 0; } static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->freq; return 0; } static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->sharpness = val; if (gspca_dev->streaming) setsharpness(gspca_dev); return 0; } static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->sharpness; return 0; } /* Low Light set here......*/ static int sd_setlowlight(struct gspca_dev *gspca_dev, __s32 val) { struct sd *sd = (struct sd *) gspca_dev; sd->autogain = val; if (val != 0) reg_w(gspca_dev, 0xf48e); else reg_w(gspca_dev, 0xb48e); return 0; } static int sd_getlowlight(struct gspca_dev *gspca_dev, __s32 *val) { struct sd *sd = (struct sd *) gspca_dev; *val = sd->autogain; return 0; } static int sd_querymenu(struct gspca_dev *gspca_dev, struct v4l2_querymenu *menu) { switch (menu->id) { case V4L2_CID_POWER_LINE_FREQUENCY: switch (menu->index) { case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */ strcpy((char *) menu->name, "50 Hz"); return 0; case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */ strcpy((char *) menu->name, "60 Hz"); return 0; } break; case V4L2_CID_EFFECTS: if ((unsigned) menu->index < ARRAY_SIZE(effects_control)) { strncpy((char *) menu->name, effects_control[menu->index], 32); return 0; } break; } return -EINVAL; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = ARRAY_SIZE(sd_ctrls), .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .querymenu = sd_querymenu, }; /* -- module initialisation -- */ static const __devinitdata struct usb_device_id device_table[] = { {USB_DEVICE(0x17a1, 0x0128)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; /* -- module insert / remove -- */ static int __init sd_mod_init(void) { int ret; ret = usb_register(&sd_driver); if (ret < 0) return ret; PDEBUG(D_PROBE, "registered"); return 0; } static void __exit sd_mod_exit(void) { usb_deregister(&sd_driver); PDEBUG(D_PROBE, "deregistered"); } module_init(sd_mod_init); module_exit(sd_mod_exit);
gpl-2.0
Sombionix/zissou_kernel
drivers/media/video/saa7134/saa7134-core.c
760
36709
/* * * device driver for philips saa7134 based TV cards * driver core * * (c) 2001-03 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/sound.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/dma-mapping.h> #include <linux/pm.h> #include "saa7134-reg.h" #include "saa7134.h" MODULE_DESCRIPTION("v4l2 driver module for saa7130/34 based TV cards"); MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); MODULE_LICENSE("GPL"); /* ------------------------------------------------------------------ */ static unsigned int irq_debug; module_param(irq_debug, int, 0644); MODULE_PARM_DESC(irq_debug,"enable debug messages [IRQ handler]"); static unsigned int core_debug; module_param(core_debug, int, 0644); MODULE_PARM_DESC(core_debug,"enable debug messages [core]"); static unsigned int gpio_tracking; module_param(gpio_tracking, int, 0644); MODULE_PARM_DESC(gpio_tracking,"enable debug messages [gpio]"); static unsigned int alsa = 1; module_param(alsa, int, 0644); MODULE_PARM_DESC(alsa,"enable/disable ALSA DMA sound [dmasound]"); static unsigned int latency = UNSET; module_param(latency, int, 0444); MODULE_PARM_DESC(latency,"pci latency timer"); int saa7134_no_overlay=-1; module_param_named(no_overlay, saa7134_no_overlay, int, 0444); MODULE_PARM_DESC(no_overlay,"allow override overlay default (0 disables, 1 enables)" " [some VIA/SIS chipsets are known to have problem with overlay]"); static unsigned int video_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET }; static unsigned int vbi_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET }; static unsigned int radio_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET }; static unsigned int tuner[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET }; static unsigned int card[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET }; module_param_array(video_nr, int, NULL, 0444); module_param_array(vbi_nr, int, NULL, 0444); module_param_array(radio_nr, int, NULL, 0444); module_param_array(tuner, int, NULL, 0444); module_param_array(card, int, NULL, 0444); MODULE_PARM_DESC(video_nr, "video device number"); MODULE_PARM_DESC(vbi_nr, "vbi device number"); MODULE_PARM_DESC(radio_nr, "radio device number"); MODULE_PARM_DESC(tuner, "tuner type"); MODULE_PARM_DESC(card, "card type"); DEFINE_MUTEX(saa7134_devlist_lock); EXPORT_SYMBOL(saa7134_devlist_lock); LIST_HEAD(saa7134_devlist); EXPORT_SYMBOL(saa7134_devlist); static LIST_HEAD(mops_list); static unsigned int saa7134_devcount; int (*saa7134_dmasound_init)(struct saa7134_dev *dev); int (*saa7134_dmasound_exit)(struct saa7134_dev *dev); #define dprintk(fmt, arg...) if (core_debug) \ printk(KERN_DEBUG "%s/core: " fmt, dev->name , ## arg) void saa7134_track_gpio(struct saa7134_dev *dev, char *msg) { unsigned long mode,status; if (!gpio_tracking) return; /* rising SAA7134_GPIO_GPRESCAN reads the status */ saa_andorb(SAA7134_GPIO_GPMODE3,SAA7134_GPIO_GPRESCAN,0); saa_andorb(SAA7134_GPIO_GPMODE3,SAA7134_GPIO_GPRESCAN,SAA7134_GPIO_GPRESCAN); mode = saa_readl(SAA7134_GPIO_GPMODE0 >> 2) & 0xfffffff; status = saa_readl(SAA7134_GPIO_GPSTATUS0 >> 2) & 0xfffffff; printk(KERN_DEBUG "%s: gpio: mode=0x%07lx in=0x%07lx out=0x%07lx [%s]\n", dev->name, mode, (~mode) & status, mode & status, msg); } void saa7134_set_gpio(struct saa7134_dev *dev, int bit_no, int value) { u32 index, bitval; index = 1 << bit_no; switch (value) { case 0: /* static value */ case 1: dprintk("setting GPIO%d to static %d\n", bit_no, value); /* turn sync mode off if necessary */ if (index & 0x00c00000) saa_andorb(SAA7134_VIDEO_PORT_CTRL6, 0x0f, 0x00); if (value) bitval = index; else bitval = 0; saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, index, index); saa_andorl(SAA7134_GPIO_GPSTATUS0 >> 2, index, bitval); break; case 3: /* tristate */ dprintk("setting GPIO%d to tristate\n", bit_no); saa_andorl(SAA7134_GPIO_GPMODE0 >> 2, index, 0); break; } } /* ------------------------------------------------------------------ */ /* ----------------------------------------------------------- */ /* delayed request_module */ #if defined(CONFIG_MODULES) && defined(MODULE) static void request_module_async(struct work_struct *work){ struct saa7134_dev* dev = container_of(work, struct saa7134_dev, request_module_wk); if (card_is_empress(dev)) request_module("saa7134-empress"); if (card_is_dvb(dev)) request_module("saa7134-dvb"); if (alsa) { if (dev->pci->device != PCI_DEVICE_ID_PHILIPS_SAA7130) request_module("saa7134-alsa"); } } static void request_submodules(struct saa7134_dev *dev) { INIT_WORK(&dev->request_module_wk, request_module_async); schedule_work(&dev->request_module_wk); } #else #define request_submodules(dev) #endif /* CONFIG_MODULES */ /* ------------------------------------------------------------------ */ /* nr of (saa7134-)pages for the given buffer size */ static int saa7134_buffer_pages(int size) { size = PAGE_ALIGN(size); size += PAGE_SIZE; /* for non-page-aligned buffers */ size /= 4096; return size; } /* calc max # of buffers from size (must not exceed the 4MB virtual * address space per DMA channel) */ int saa7134_buffer_count(unsigned int size, unsigned int count) { unsigned int maxcount; maxcount = 1024 / saa7134_buffer_pages(size); if (count > maxcount) count = maxcount; return count; } int saa7134_buffer_startpage(struct saa7134_buf *buf) { return saa7134_buffer_pages(buf->vb.bsize) * buf->vb.i; } unsigned long saa7134_buffer_base(struct saa7134_buf *buf) { unsigned long base; struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); base = saa7134_buffer_startpage(buf) * 4096; base += dma->sglist[0].offset; return base; } /* ------------------------------------------------------------------ */ int saa7134_pgtable_alloc(struct pci_dev *pci, struct saa7134_pgtable *pt) { __le32 *cpu; dma_addr_t dma_addr = 0; cpu = pci_alloc_consistent(pci, SAA7134_PGTABLE_SIZE, &dma_addr); if (NULL == cpu) return -ENOMEM; pt->size = SAA7134_PGTABLE_SIZE; pt->cpu = cpu; pt->dma = dma_addr; return 0; } int saa7134_pgtable_build(struct pci_dev *pci, struct saa7134_pgtable *pt, struct scatterlist *list, unsigned int length, unsigned int startpage) { __le32 *ptr; unsigned int i,p; BUG_ON(NULL == pt || NULL == pt->cpu); ptr = pt->cpu + startpage; for (i = 0; i < length; i++, list++) for (p = 0; p * 4096 < list->length; p++, ptr++) *ptr = cpu_to_le32(sg_dma_address(list) - list->offset); return 0; } void saa7134_pgtable_free(struct pci_dev *pci, struct saa7134_pgtable *pt) { if (NULL == pt->cpu) return; pci_free_consistent(pci, pt->size, pt->cpu, pt->dma); pt->cpu = NULL; } /* ------------------------------------------------------------------ */ void saa7134_dma_free(struct videobuf_queue *q,struct saa7134_buf *buf) { struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); BUG_ON(in_interrupt()); videobuf_waiton(&buf->vb,0,0); videobuf_dma_unmap(q, dma); videobuf_dma_free(dma); buf->vb.state = VIDEOBUF_NEEDS_INIT; } /* ------------------------------------------------------------------ */ int saa7134_buffer_queue(struct saa7134_dev *dev, struct saa7134_dmaqueue *q, struct saa7134_buf *buf) { struct saa7134_buf *next = NULL; assert_spin_locked(&dev->slock); dprintk("buffer_queue %p\n",buf); if (NULL == q->curr) { if (!q->need_two) { q->curr = buf; buf->activate(dev,buf,NULL); } else if (list_empty(&q->queue)) { list_add_tail(&buf->vb.queue,&q->queue); buf->vb.state = VIDEOBUF_QUEUED; } else { next = list_entry(q->queue.next,struct saa7134_buf, vb.queue); q->curr = buf; buf->activate(dev,buf,next); } } else { list_add_tail(&buf->vb.queue,&q->queue); buf->vb.state = VIDEOBUF_QUEUED; } return 0; } void saa7134_buffer_finish(struct saa7134_dev *dev, struct saa7134_dmaqueue *q, unsigned int state) { assert_spin_locked(&dev->slock); dprintk("buffer_finish %p\n",q->curr); /* finish current buffer */ q->curr->vb.state = state; do_gettimeofday(&q->curr->vb.ts); wake_up(&q->curr->vb.done); q->curr = NULL; } void saa7134_buffer_next(struct saa7134_dev *dev, struct saa7134_dmaqueue *q) { struct saa7134_buf *buf,*next = NULL; assert_spin_locked(&dev->slock); BUG_ON(NULL != q->curr); if (!list_empty(&q->queue)) { /* activate next one from queue */ buf = list_entry(q->queue.next,struct saa7134_buf,vb.queue); dprintk("buffer_next %p [prev=%p/next=%p]\n", buf,q->queue.prev,q->queue.next); list_del(&buf->vb.queue); if (!list_empty(&q->queue)) next = list_entry(q->queue.next,struct saa7134_buf, vb.queue); q->curr = buf; buf->activate(dev,buf,next); dprintk("buffer_next #2 prev=%p/next=%p\n", q->queue.prev,q->queue.next); } else { /* nothing to do -- just stop DMA */ dprintk("buffer_next %p\n",NULL); saa7134_set_dmabits(dev); del_timer(&q->timeout); if (card_has_mpeg(dev)) if (dev->ts_started) saa7134_ts_stop(dev); } } void saa7134_buffer_timeout(unsigned long data) { struct saa7134_dmaqueue *q = (struct saa7134_dmaqueue*)data; struct saa7134_dev *dev = q->dev; unsigned long flags; spin_lock_irqsave(&dev->slock,flags); /* try to reset the hardware (SWRST) */ saa_writeb(SAA7134_REGION_ENABLE, 0x00); saa_writeb(SAA7134_REGION_ENABLE, 0x80); saa_writeb(SAA7134_REGION_ENABLE, 0x00); /* flag current buffer as failed, try to start over with the next one. */ if (q->curr) { dprintk("timeout on %p\n",q->curr); saa7134_buffer_finish(dev,q,VIDEOBUF_ERROR); } saa7134_buffer_next(dev,q); spin_unlock_irqrestore(&dev->slock,flags); } /* ------------------------------------------------------------------ */ int saa7134_set_dmabits(struct saa7134_dev *dev) { u32 split, task=0, ctrl=0, irq=0; enum v4l2_field cap = V4L2_FIELD_ANY; enum v4l2_field ov = V4L2_FIELD_ANY; assert_spin_locked(&dev->slock); if (dev->insuspend) return 0; /* video capture -- dma 0 + video task A */ if (dev->video_q.curr) { task |= 0x01; ctrl |= SAA7134_MAIN_CTRL_TE0; irq |= SAA7134_IRQ1_INTE_RA0_1 | SAA7134_IRQ1_INTE_RA0_0; cap = dev->video_q.curr->vb.field; } /* video capture -- dma 1+2 (planar modes) */ if (dev->video_q.curr && dev->video_q.curr->fmt->planar) { ctrl |= SAA7134_MAIN_CTRL_TE4 | SAA7134_MAIN_CTRL_TE5; } /* screen overlay -- dma 0 + video task B */ if (dev->ovenable) { task |= 0x10; ctrl |= SAA7134_MAIN_CTRL_TE1; ov = dev->ovfield; } /* vbi capture -- dma 0 + vbi task A+B */ if (dev->vbi_q.curr) { task |= 0x22; ctrl |= SAA7134_MAIN_CTRL_TE2 | SAA7134_MAIN_CTRL_TE3; irq |= SAA7134_IRQ1_INTE_RA0_7 | SAA7134_IRQ1_INTE_RA0_6 | SAA7134_IRQ1_INTE_RA0_5 | SAA7134_IRQ1_INTE_RA0_4; } /* audio capture -- dma 3 */ if (dev->dmasound.dma_running) { ctrl |= SAA7134_MAIN_CTRL_TE6; irq |= SAA7134_IRQ1_INTE_RA3_1 | SAA7134_IRQ1_INTE_RA3_0; } /* TS capture -- dma 5 */ if (dev->ts_q.curr) { ctrl |= SAA7134_MAIN_CTRL_TE5; irq |= SAA7134_IRQ1_INTE_RA2_1 | SAA7134_IRQ1_INTE_RA2_0; } /* set task conditions + field handling */ if (V4L2_FIELD_HAS_BOTH(cap) || V4L2_FIELD_HAS_BOTH(ov) || cap == ov) { /* default config -- use full frames */ saa_writeb(SAA7134_TASK_CONDITIONS(TASK_A), 0x0d); saa_writeb(SAA7134_TASK_CONDITIONS(TASK_B), 0x0d); saa_writeb(SAA7134_FIELD_HANDLING(TASK_A), 0x02); saa_writeb(SAA7134_FIELD_HANDLING(TASK_B), 0x02); split = 0; } else { /* split fields between tasks */ if (V4L2_FIELD_TOP == cap) { /* odd A, even B, repeat */ saa_writeb(SAA7134_TASK_CONDITIONS(TASK_A), 0x0d); saa_writeb(SAA7134_TASK_CONDITIONS(TASK_B), 0x0e); } else { /* odd B, even A, repeat */ saa_writeb(SAA7134_TASK_CONDITIONS(TASK_A), 0x0e); saa_writeb(SAA7134_TASK_CONDITIONS(TASK_B), 0x0d); } saa_writeb(SAA7134_FIELD_HANDLING(TASK_A), 0x01); saa_writeb(SAA7134_FIELD_HANDLING(TASK_B), 0x01); split = 1; } /* irqs */ saa_writeb(SAA7134_REGION_ENABLE, task); saa_writel(SAA7134_IRQ1, irq); saa_andorl(SAA7134_MAIN_CTRL, SAA7134_MAIN_CTRL_TE0 | SAA7134_MAIN_CTRL_TE1 | SAA7134_MAIN_CTRL_TE2 | SAA7134_MAIN_CTRL_TE3 | SAA7134_MAIN_CTRL_TE4 | SAA7134_MAIN_CTRL_TE5 | SAA7134_MAIN_CTRL_TE6, ctrl); dprintk("dmabits: task=0x%02x ctrl=0x%02x irq=0x%x split=%s\n", task, ctrl, irq, split ? "no" : "yes"); return 0; } /* ------------------------------------------------------------------ */ /* IRQ handler + helpers */ static char *irqbits[] = { "DONE_RA0", "DONE_RA1", "DONE_RA2", "DONE_RA3", "AR", "PE", "PWR_ON", "RDCAP", "INTL", "FIDT", "MMC", "TRIG_ERR", "CONF_ERR", "LOAD_ERR", "GPIO16", "GPIO18", "GPIO22", "GPIO23" }; #define IRQBITS ARRAY_SIZE(irqbits) static void print_irqstatus(struct saa7134_dev *dev, int loop, unsigned long report, unsigned long status) { unsigned int i; printk(KERN_DEBUG "%s/irq[%d,%ld]: r=0x%lx s=0x%02lx", dev->name,loop,jiffies,report,status); for (i = 0; i < IRQBITS; i++) { if (!(report & (1 << i))) continue; printk(" %s",irqbits[i]); } if (report & SAA7134_IRQ_REPORT_DONE_RA0) { printk(" | RA0=%s,%s,%s,%ld", (status & 0x40) ? "vbi" : "video", (status & 0x20) ? "b" : "a", (status & 0x10) ? "odd" : "even", (status & 0x0f)); } printk("\n"); } static irqreturn_t saa7134_irq(int irq, void *dev_id) { struct saa7134_dev *dev = (struct saa7134_dev*) dev_id; unsigned long report,status; int loop, handled = 0; if (dev->insuspend) goto out; for (loop = 0; loop < 10; loop++) { report = saa_readl(SAA7134_IRQ_REPORT); status = saa_readl(SAA7134_IRQ_STATUS); /* If dmasound support is active and we get a sound report, * mask out the report and let the saa7134-alsa module deal * with it */ if ((report & SAA7134_IRQ_REPORT_DONE_RA3) && (dev->dmasound.priv_data != NULL) ) { if (irq_debug > 1) printk(KERN_DEBUG "%s/irq: preserving DMA sound interrupt\n", dev->name); report &= ~SAA7134_IRQ_REPORT_DONE_RA3; } if (0 == report) { if (irq_debug > 1) printk(KERN_DEBUG "%s/irq: no (more) work\n", dev->name); goto out; } handled = 1; saa_writel(SAA7134_IRQ_REPORT,report); if (irq_debug) print_irqstatus(dev,loop,report,status); if ((report & SAA7134_IRQ_REPORT_RDCAP) || (report & SAA7134_IRQ_REPORT_INTL)) saa7134_irq_video_signalchange(dev); if ((report & SAA7134_IRQ_REPORT_DONE_RA0) && (status & 0x60) == 0) saa7134_irq_video_done(dev,status); if ((report & SAA7134_IRQ_REPORT_DONE_RA0) && (status & 0x40) == 0x40) saa7134_irq_vbi_done(dev,status); if ((report & SAA7134_IRQ_REPORT_DONE_RA2) && card_has_mpeg(dev)) saa7134_irq_ts_done(dev,status); if (report & SAA7134_IRQ_REPORT_GPIO16) { switch (dev->has_remote) { case SAA7134_REMOTE_GPIO: if (!dev->remote) break; if (dev->remote->mask_keydown & 0x10000) { saa7134_input_irq(dev); } break; case SAA7134_REMOTE_I2C: break; /* FIXME: invoke I2C get_key() */ default: /* GPIO16 not used by IR remote */ break; } } if (report & SAA7134_IRQ_REPORT_GPIO18) { switch (dev->has_remote) { case SAA7134_REMOTE_GPIO: if (!dev->remote) break; if ((dev->remote->mask_keydown & 0x40000) || (dev->remote->mask_keyup & 0x40000)) { saa7134_input_irq(dev); } break; case SAA7134_REMOTE_I2C: break; /* FIXME: invoke I2C get_key() */ default: /* GPIO18 not used by IR remote */ break; } } } if (10 == loop) { print_irqstatus(dev,loop,report,status); if (report & SAA7134_IRQ_REPORT_PE) { /* disable all parity error */ printk(KERN_WARNING "%s/irq: looping -- " "clearing PE (parity error!) enable bit\n",dev->name); saa_clearl(SAA7134_IRQ2,SAA7134_IRQ2_INTE_PE); } else if (report & SAA7134_IRQ_REPORT_GPIO16) { /* disable gpio16 IRQ */ printk(KERN_WARNING "%s/irq: looping -- " "clearing GPIO16 enable bit\n",dev->name); saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_P); saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_N); } else if (report & SAA7134_IRQ_REPORT_GPIO18) { /* disable gpio18 IRQs */ printk(KERN_WARNING "%s/irq: looping -- " "clearing GPIO18 enable bit\n",dev->name); saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P); saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_N); } else { /* disable all irqs */ printk(KERN_WARNING "%s/irq: looping -- " "clearing all enable bits\n",dev->name); saa_writel(SAA7134_IRQ1,0); saa_writel(SAA7134_IRQ2,0); } } out: return IRQ_RETVAL(handled); } /* ------------------------------------------------------------------ */ /* early init (no i2c, no irq) */ static int saa7134_hw_enable1(struct saa7134_dev *dev) { /* RAM FIFO config */ saa_writel(SAA7134_FIFO_SIZE, 0x08070503); saa_writel(SAA7134_THRESHOULD, 0x02020202); /* enable audio + video processing */ saa_writel(SAA7134_MAIN_CTRL, SAA7134_MAIN_CTRL_VPLLE | SAA7134_MAIN_CTRL_APLLE | SAA7134_MAIN_CTRL_EXOSC | SAA7134_MAIN_CTRL_EVFE1 | SAA7134_MAIN_CTRL_EVFE2 | SAA7134_MAIN_CTRL_ESFE | SAA7134_MAIN_CTRL_EBDAC); /* * Initialize OSS _after_ enabling audio clock PLL and audio processing. * OSS initialization writes to registers via the audio DSP; these * writes will fail unless the audio clock has been started. At worst, * audio will not work. */ /* enable peripheral devices */ saa_writeb(SAA7134_SPECIAL_MODE, 0x01); /* set vertical line numbering start (vbi needs this) */ saa_writeb(SAA7134_SOURCE_TIMING2, 0x20); return 0; } static int saa7134_hwinit1(struct saa7134_dev *dev) { dprintk("hwinit1\n"); saa_writel(SAA7134_IRQ1, 0); saa_writel(SAA7134_IRQ2, 0); /* Clear any stale IRQ reports */ saa_writel(SAA7134_IRQ_REPORT, saa_readl(SAA7134_IRQ_REPORT)); mutex_init(&dev->lock); spin_lock_init(&dev->slock); saa7134_track_gpio(dev,"pre-init"); saa7134_video_init1(dev); saa7134_vbi_init1(dev); if (card_has_mpeg(dev)) saa7134_ts_init1(dev); saa7134_input_init1(dev); saa7134_hw_enable1(dev); return 0; } /* late init (with i2c + irq) */ static int saa7134_hw_enable2(struct saa7134_dev *dev) { unsigned int irq2_mask; /* enable IRQ's */ irq2_mask = SAA7134_IRQ2_INTE_DEC3 | SAA7134_IRQ2_INTE_DEC2 | SAA7134_IRQ2_INTE_DEC1 | SAA7134_IRQ2_INTE_DEC0 | SAA7134_IRQ2_INTE_PE | SAA7134_IRQ2_INTE_AR; if (dev->has_remote == SAA7134_REMOTE_GPIO && dev->remote) { if (dev->remote->mask_keydown & 0x10000) irq2_mask |= SAA7134_IRQ2_INTE_GPIO16_N; else { /* Allow enabling both IRQ edge triggers */ if (dev->remote->mask_keydown & 0x40000) irq2_mask |= SAA7134_IRQ2_INTE_GPIO18_P; if (dev->remote->mask_keyup & 0x40000) irq2_mask |= SAA7134_IRQ2_INTE_GPIO18_N; } } if (dev->has_remote == SAA7134_REMOTE_I2C) { request_module("ir-kbd-i2c"); } saa_writel(SAA7134_IRQ1, 0); saa_writel(SAA7134_IRQ2, irq2_mask); return 0; } static int saa7134_hwinit2(struct saa7134_dev *dev) { dprintk("hwinit2\n"); saa7134_video_init2(dev); saa7134_tvaudio_init2(dev); saa7134_hw_enable2(dev); return 0; } /* shutdown */ static int saa7134_hwfini(struct saa7134_dev *dev) { dprintk("hwfini\n"); if (card_has_mpeg(dev)) saa7134_ts_fini(dev); saa7134_input_fini(dev); saa7134_vbi_fini(dev); saa7134_tvaudio_fini(dev); return 0; } static void __devinit must_configure_manually(void) { unsigned int i,p; printk(KERN_WARNING "saa7134: <rant>\n" "saa7134: Congratulations! Your TV card vendor saved a few\n" "saa7134: cents for a eeprom, thus your pci board has no\n" "saa7134: subsystem ID and I can't identify it automatically\n" "saa7134: </rant>\n" "saa7134: I feel better now. Ok, here are the good news:\n" "saa7134: You can use the card=<nr> insmod option to specify\n" "saa7134: which board do you have. The list:\n"); for (i = 0; i < saa7134_bcount; i++) { printk(KERN_WARNING "saa7134: card=%d -> %-40.40s", i,saa7134_boards[i].name); for (p = 0; saa7134_pci_tbl[p].driver_data; p++) { if (saa7134_pci_tbl[p].driver_data != i) continue; printk(" %04x:%04x", saa7134_pci_tbl[p].subvendor, saa7134_pci_tbl[p].subdevice); } printk("\n"); } } static struct video_device *vdev_init(struct saa7134_dev *dev, struct video_device *template, char *type) { struct video_device *vfd; vfd = video_device_alloc(); if (NULL == vfd) return NULL; *vfd = *template; vfd->v4l2_dev = &dev->v4l2_dev; vfd->release = video_device_release; vfd->debug = video_debug; snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name, type, saa7134_boards[dev->board].name); video_set_drvdata(vfd, dev); return vfd; } static void saa7134_unregister_video(struct saa7134_dev *dev) { if (dev->video_dev) { if (video_is_registered(dev->video_dev)) video_unregister_device(dev->video_dev); else video_device_release(dev->video_dev); dev->video_dev = NULL; } if (dev->vbi_dev) { if (video_is_registered(dev->vbi_dev)) video_unregister_device(dev->vbi_dev); else video_device_release(dev->vbi_dev); dev->vbi_dev = NULL; } if (dev->radio_dev) { if (video_is_registered(dev->radio_dev)) video_unregister_device(dev->radio_dev); else video_device_release(dev->radio_dev); dev->radio_dev = NULL; } } static void mpeg_ops_attach(struct saa7134_mpeg_ops *ops, struct saa7134_dev *dev) { int err; if (NULL != dev->mops) return; if (saa7134_boards[dev->board].mpeg != ops->type) return; err = ops->init(dev); if (0 != err) return; dev->mops = ops; } static void mpeg_ops_detach(struct saa7134_mpeg_ops *ops, struct saa7134_dev *dev) { if (NULL == dev->mops) return; if (dev->mops != ops) return; dev->mops->fini(dev); dev->mops = NULL; } static int __devinit saa7134_initdev(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { struct saa7134_dev *dev; struct saa7134_mpeg_ops *mops; int err; if (saa7134_devcount == SAA7134_MAXBOARDS) return -ENOMEM; dev = kzalloc(sizeof(*dev),GFP_KERNEL); if (NULL == dev) return -ENOMEM; err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev); if (err) goto fail0; /* pci init */ dev->pci = pci_dev; if (pci_enable_device(pci_dev)) { err = -EIO; goto fail1; } dev->nr = saa7134_devcount; sprintf(dev->name,"saa%x[%d]",pci_dev->device,dev->nr); /* pci quirks */ if (pci_pci_problems) { if (pci_pci_problems & PCIPCI_TRITON) printk(KERN_INFO "%s: quirk: PCIPCI_TRITON\n", dev->name); if (pci_pci_problems & PCIPCI_NATOMA) printk(KERN_INFO "%s: quirk: PCIPCI_NATOMA\n", dev->name); if (pci_pci_problems & PCIPCI_VIAETBF) printk(KERN_INFO "%s: quirk: PCIPCI_VIAETBF\n", dev->name); if (pci_pci_problems & PCIPCI_VSFX) printk(KERN_INFO "%s: quirk: PCIPCI_VSFX\n",dev->name); #ifdef PCIPCI_ALIMAGIK if (pci_pci_problems & PCIPCI_ALIMAGIK) { printk(KERN_INFO "%s: quirk: PCIPCI_ALIMAGIK -- latency fixup\n", dev->name); latency = 0x0A; } #endif if (pci_pci_problems & (PCIPCI_FAIL|PCIAGP_FAIL)) { printk(KERN_INFO "%s: quirk: this driver and your " "chipset may not work together" " in overlay mode.\n",dev->name); if (!saa7134_no_overlay) { printk(KERN_INFO "%s: quirk: overlay " "mode will be disabled.\n", dev->name); saa7134_no_overlay = 1; } else { printk(KERN_INFO "%s: quirk: overlay " "mode will be forced. Use this" " option at your own risk.\n", dev->name); } } } if (UNSET != latency) { printk(KERN_INFO "%s: setting pci latency timer to %d\n", dev->name,latency); pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency); } /* print pci info */ pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev); pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat); printk(KERN_INFO "%s: found at %s, rev: %d, irq: %d, " "latency: %d, mmio: 0x%llx\n", dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); pci_set_master(pci_dev); if (!pci_dma_supported(pci_dev, DMA_BIT_MASK(32))) { printk("%s: Oops: no 32bit PCI DMA ???\n",dev->name); err = -EIO; goto fail1; } /* board config */ dev->board = pci_id->driver_data; if (card[dev->nr] >= 0 && card[dev->nr] < saa7134_bcount) dev->board = card[dev->nr]; if (SAA7134_BOARD_NOAUTO == dev->board) { must_configure_manually(); dev->board = SAA7134_BOARD_UNKNOWN; } dev->autodetected = card[dev->nr] != dev->board; dev->tuner_type = saa7134_boards[dev->board].tuner_type; dev->tuner_addr = saa7134_boards[dev->board].tuner_addr; dev->radio_type = saa7134_boards[dev->board].radio_type; dev->radio_addr = saa7134_boards[dev->board].radio_addr; dev->tda9887_conf = saa7134_boards[dev->board].tda9887_conf; if (UNSET != tuner[dev->nr]) dev->tuner_type = tuner[dev->nr]; printk(KERN_INFO "%s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n", dev->name,pci_dev->subsystem_vendor, pci_dev->subsystem_device,saa7134_boards[dev->board].name, dev->board, dev->autodetected ? "autodetected" : "insmod option"); /* get mmio */ if (!request_mem_region(pci_resource_start(pci_dev,0), pci_resource_len(pci_dev,0), dev->name)) { err = -EBUSY; printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n", dev->name,(unsigned long long)pci_resource_start(pci_dev,0)); goto fail1; } dev->lmmio = ioremap(pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0)); dev->bmmio = (__u8 __iomem *)dev->lmmio; if (NULL == dev->lmmio) { err = -EIO; printk(KERN_ERR "%s: can't ioremap() MMIO memory\n", dev->name); goto fail2; } /* initialize hardware #1 */ saa7134_board_init1(dev); saa7134_hwinit1(dev); /* get irq */ err = request_irq(pci_dev->irq, saa7134_irq, IRQF_SHARED | IRQF_DISABLED, dev->name, dev); if (err < 0) { printk(KERN_ERR "%s: can't get IRQ %d\n", dev->name,pci_dev->irq); goto fail3; } /* wait a bit, register i2c bus */ msleep(100); saa7134_i2c_register(dev); saa7134_board_init2(dev); saa7134_hwinit2(dev); /* load i2c helpers */ if (card_is_empress(dev)) { struct v4l2_subdev *sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "saa6752hs", "saa6752hs", saa7134_boards[dev->board].empress_addr, NULL); if (sd) sd->grp_id = GRP_EMPRESS; } if (saa7134_boards[dev->board].rds_addr) { struct v4l2_subdev *sd; sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "saa6588", "saa6588", 0, I2C_ADDRS(saa7134_boards[dev->board].rds_addr)); if (sd) { printk(KERN_INFO "%s: found RDS decoder\n", dev->name); dev->has_rds = 1; } } request_submodules(dev); v4l2_prio_init(&dev->prio); mutex_lock(&saa7134_devlist_lock); list_for_each_entry(mops, &mops_list, next) mpeg_ops_attach(mops, dev); list_add_tail(&dev->devlist, &saa7134_devlist); mutex_unlock(&saa7134_devlist_lock); /* check for signal */ saa7134_irq_video_signalchange(dev); if (TUNER_ABSENT != dev->tuner_type) saa_call_all(dev, core, s_power, 0); /* register v4l devices */ if (saa7134_no_overlay > 0) printk(KERN_INFO "%s: Overlay support disabled.\n", dev->name); dev->video_dev = vdev_init(dev,&saa7134_video_template,"video"); err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER, video_nr[dev->nr]); if (err < 0) { printk(KERN_INFO "%s: can't register video device\n", dev->name); goto fail4; } printk(KERN_INFO "%s: registered device %s [v4l2]\n", dev->name, video_device_node_name(dev->video_dev)); dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi"); err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI, vbi_nr[dev->nr]); if (err < 0) goto fail4; printk(KERN_INFO "%s: registered device %s\n", dev->name, video_device_node_name(dev->vbi_dev)); if (card_has_radio(dev)) { dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio"); err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO, radio_nr[dev->nr]); if (err < 0) goto fail4; printk(KERN_INFO "%s: registered device %s\n", dev->name, video_device_node_name(dev->radio_dev)); } /* everything worked */ saa7134_devcount++; if (saa7134_dmasound_init && !dev->dmasound.priv_data) saa7134_dmasound_init(dev); return 0; fail4: saa7134_unregister_video(dev); saa7134_i2c_unregister(dev); free_irq(pci_dev->irq, dev); fail3: saa7134_hwfini(dev); iounmap(dev->lmmio); fail2: release_mem_region(pci_resource_start(pci_dev,0), pci_resource_len(pci_dev,0)); fail1: v4l2_device_unregister(&dev->v4l2_dev); fail0: kfree(dev); return err; } static void __devexit saa7134_finidev(struct pci_dev *pci_dev) { struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev); struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev); struct saa7134_mpeg_ops *mops; /* Release DMA sound modules if present */ if (saa7134_dmasound_exit && dev->dmasound.priv_data) { saa7134_dmasound_exit(dev); } /* debugging ... */ if (irq_debug) { u32 report = saa_readl(SAA7134_IRQ_REPORT); u32 status = saa_readl(SAA7134_IRQ_STATUS); print_irqstatus(dev,42,report,status); } /* disable peripheral devices */ saa_writeb(SAA7134_SPECIAL_MODE,0); /* shutdown hardware */ saa_writel(SAA7134_IRQ1,0); saa_writel(SAA7134_IRQ2,0); saa_writel(SAA7134_MAIN_CTRL,0); /* shutdown subsystems */ saa7134_hwfini(dev); /* unregister */ mutex_lock(&saa7134_devlist_lock); list_del(&dev->devlist); list_for_each_entry(mops, &mops_list, next) mpeg_ops_detach(mops, dev); mutex_unlock(&saa7134_devlist_lock); saa7134_devcount--; saa7134_i2c_unregister(dev); saa7134_unregister_video(dev); /* the DMA sound modules should be unloaded before reaching this, but just in case they are still present... */ if (dev->dmasound.priv_data != NULL) { free_irq(pci_dev->irq, &dev->dmasound); dev->dmasound.priv_data = NULL; } /* release resources */ free_irq(pci_dev->irq, dev); iounmap(dev->lmmio); release_mem_region(pci_resource_start(pci_dev,0), pci_resource_len(pci_dev,0)); v4l2_device_unregister(&dev->v4l2_dev); /* free memory */ kfree(dev); } #ifdef CONFIG_PM /* resends a current buffer in queue after resume */ static int saa7134_buffer_requeue(struct saa7134_dev *dev, struct saa7134_dmaqueue *q) { struct saa7134_buf *buf, *next; assert_spin_locked(&dev->slock); buf = q->curr; next = buf; dprintk("buffer_requeue\n"); if (!buf) return 0; dprintk("buffer_requeue : resending active buffers \n"); if (!list_empty(&q->queue)) next = list_entry(q->queue.next, struct saa7134_buf, vb.queue); buf->activate(dev, buf, next); return 0; } static int saa7134_suspend(struct pci_dev *pci_dev , pm_message_t state) { struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev); struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev); /* disable overlay - apps should enable it explicitly on resume*/ dev->ovenable = 0; /* Disable interrupts, DMA, and rest of the chip*/ saa_writel(SAA7134_IRQ1, 0); saa_writel(SAA7134_IRQ2, 0); saa_writel(SAA7134_MAIN_CTRL, 0); dev->insuspend = 1; synchronize_irq(pci_dev->irq); /* ACK interrupts once more, just in case, since the IRQ handler won't ack them anymore*/ saa_writel(SAA7134_IRQ_REPORT, saa_readl(SAA7134_IRQ_REPORT)); /* Disable timeout timers - if we have active buffers, we will fill them on resume*/ del_timer(&dev->video_q.timeout); del_timer(&dev->vbi_q.timeout); del_timer(&dev->ts_q.timeout); if (dev->remote) saa7134_ir_stop(dev); pci_save_state(pci_dev); pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); return 0; } static int saa7134_resume(struct pci_dev *pci_dev) { struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev); struct saa7134_dev *dev = container_of(v4l2_dev, struct saa7134_dev, v4l2_dev); unsigned long flags; pci_set_power_state(pci_dev, PCI_D0); pci_restore_state(pci_dev); /* Do things that are done in saa7134_initdev , except of initializing memory structures.*/ saa7134_board_init1(dev); /* saa7134_hwinit1 */ if (saa7134_boards[dev->board].video_out) saa7134_videoport_init(dev); if (card_has_mpeg(dev)) saa7134_ts_init_hw(dev); if (dev->remote) saa7134_ir_start(dev); saa7134_hw_enable1(dev); msleep(100); saa7134_board_init2(dev); /*saa7134_hwinit2*/ saa7134_set_tvnorm_hw(dev); saa7134_tvaudio_setmute(dev); saa7134_tvaudio_setvolume(dev, dev->ctl_volume); saa7134_tvaudio_init(dev); saa7134_tvaudio_do_scan(dev); saa7134_enable_i2s(dev); saa7134_hw_enable2(dev); saa7134_irq_video_signalchange(dev); /*resume unfinished buffer(s)*/ spin_lock_irqsave(&dev->slock, flags); saa7134_buffer_requeue(dev, &dev->video_q); saa7134_buffer_requeue(dev, &dev->vbi_q); saa7134_buffer_requeue(dev, &dev->ts_q); /* FIXME: Disable DMA audio sound - temporary till proper support is implemented*/ dev->dmasound.dma_running = 0; /* start DMA now*/ dev->insuspend = 0; smp_wmb(); saa7134_set_dmabits(dev); spin_unlock_irqrestore(&dev->slock, flags); return 0; } #endif /* ----------------------------------------------------------- */ int saa7134_ts_register(struct saa7134_mpeg_ops *ops) { struct saa7134_dev *dev; mutex_lock(&saa7134_devlist_lock); list_for_each_entry(dev, &saa7134_devlist, devlist) mpeg_ops_attach(ops, dev); list_add_tail(&ops->next,&mops_list); mutex_unlock(&saa7134_devlist_lock); return 0; } void saa7134_ts_unregister(struct saa7134_mpeg_ops *ops) { struct saa7134_dev *dev; mutex_lock(&saa7134_devlist_lock); list_del(&ops->next); list_for_each_entry(dev, &saa7134_devlist, devlist) mpeg_ops_detach(ops, dev); mutex_unlock(&saa7134_devlist_lock); } EXPORT_SYMBOL(saa7134_ts_register); EXPORT_SYMBOL(saa7134_ts_unregister); /* ----------------------------------------------------------- */ static struct pci_driver saa7134_pci_driver = { .name = "saa7134", .id_table = saa7134_pci_tbl, .probe = saa7134_initdev, .remove = __devexit_p(saa7134_finidev), #ifdef CONFIG_PM .suspend = saa7134_suspend, .resume = saa7134_resume #endif }; static int __init saa7134_init(void) { INIT_LIST_HEAD(&saa7134_devlist); printk(KERN_INFO "saa7130/34: v4l2 driver version %d.%d.%d loaded\n", (SAA7134_VERSION_CODE >> 16) & 0xff, (SAA7134_VERSION_CODE >> 8) & 0xff, SAA7134_VERSION_CODE & 0xff); #ifdef SNAPSHOT printk(KERN_INFO "saa7130/34: snapshot date %04d-%02d-%02d\n", SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100); #endif return pci_register_driver(&saa7134_pci_driver); } static void __exit saa7134_fini(void) { pci_unregister_driver(&saa7134_pci_driver); } module_init(saa7134_init); module_exit(saa7134_fini); /* ----------------------------------------------------------- */ EXPORT_SYMBOL(saa7134_set_gpio); EXPORT_SYMBOL(saa7134_boards); /* ----------------- for the DMA sound modules --------------- */ EXPORT_SYMBOL(saa7134_dmasound_init); EXPORT_SYMBOL(saa7134_dmasound_exit); EXPORT_SYMBOL(saa7134_pgtable_free); EXPORT_SYMBOL(saa7134_pgtable_build); EXPORT_SYMBOL(saa7134_pgtable_alloc); EXPORT_SYMBOL(saa7134_set_dmabits); /* ----------------------------------------------------------- */ /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
tvall43/odroid-w_kernel
drivers/video/fbdev/omap/lcd_palmz71.c
1272
2610
/* * LCD panel support for the Palm Zire71 * * Original version : Romain Goyet * Current version : Laurent Gonzalez * Modified for zire71 : Marek Vasut * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/io.h> #include "omapfb.h" static int palmz71_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) { return 0; } static void palmz71_panel_cleanup(struct lcd_panel *panel) { } static int palmz71_panel_enable(struct lcd_panel *panel) { return 0; } static void palmz71_panel_disable(struct lcd_panel *panel) { } static unsigned long palmz71_panel_get_caps(struct lcd_panel *panel) { return OMAPFB_CAPS_SET_BACKLIGHT; } struct lcd_panel palmz71_panel = { .name = "palmz71", .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE | OMAP_LCDC_HSVS_OPPOSITE, .data_lines = 16, .bpp = 16, .pixel_clock = 24000, .x_res = 320, .y_res = 320, .hsw = 4, .hfp = 8, .hbp = 28, .vsw = 1, .vfp = 8, .vbp = 7, .pcd = 0, .init = palmz71_panel_init, .cleanup = palmz71_panel_cleanup, .enable = palmz71_panel_enable, .disable = palmz71_panel_disable, .get_caps = palmz71_panel_get_caps, }; static int palmz71_panel_probe(struct platform_device *pdev) { omapfb_register_panel(&palmz71_panel); return 0; } static int palmz71_panel_remove(struct platform_device *pdev) { return 0; } static int palmz71_panel_suspend(struct platform_device *pdev, pm_message_t mesg) { return 0; } static int palmz71_panel_resume(struct platform_device *pdev) { return 0; } static struct platform_driver palmz71_panel_driver = { .probe = palmz71_panel_probe, .remove = palmz71_panel_remove, .suspend = palmz71_panel_suspend, .resume = palmz71_panel_resume, .driver = { .name = "lcd_palmz71", }, }; module_platform_driver(palmz71_panel_driver);
gpl-2.0
zarboz/android_kernel_htc_dlx
virt/drivers/infiniband/hw/cxgb4/cm.c
2040
72106
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/list.h> #include <linux/workqueue.h> #include <linux/skbuff.h> #include <linux/timer.h> #include <linux/notifier.h> #include <linux/inetdevice.h> #include <linux/ip.h> #include <linux/tcp.h> #include <net/neighbour.h> #include <net/netevent.h> #include <net/route.h> #include "iw_cxgb4.h" static char *states[] = { "idle", "listen", "connecting", "mpa_wait_req", "mpa_req_sent", "mpa_req_rcvd", "mpa_rep_sent", "fpdu_mode", "aborting", "closing", "moribund", "dead", NULL, }; static int dack_mode = 1; module_param(dack_mode, int, 0644); MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); int c4iw_max_read_depth = 8; module_param(c4iw_max_read_depth, int, 0644); MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); static int enable_tcp_timestamps; module_param(enable_tcp_timestamps, int, 0644); MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); static int enable_tcp_sack; module_param(enable_tcp_sack, int, 0644); MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); static int enable_tcp_window_scaling = 1; module_param(enable_tcp_window_scaling, int, 0644); MODULE_PARM_DESC(enable_tcp_window_scaling, "Enable tcp window scaling (default=1)"); int c4iw_debug; module_param(c4iw_debug, int, 0644); MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); static int peer2peer; module_param(peer2peer, int, 0644); MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; module_param(p2p_type, int, 0644); MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " "1=RDMA_READ 0=RDMA_WRITE (default 1)"); static int ep_timeout_secs = 60; module_param(ep_timeout_secs, int, 0644); MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " "in seconds (default=60)"); static int mpa_rev = 1; module_param(mpa_rev, int, 0644); MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" " compliant (default=1)"); static int markers_enabled; module_param(markers_enabled, int, 0644); MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); static int crc_enabled = 1; module_param(crc_enabled, int, 0644); MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); static int rcv_win = 256 * 1024; module_param(rcv_win, int, 0644); MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); static int snd_win = 128 * 1024; module_param(snd_win, int, 0644); MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); static struct workqueue_struct *workq; static struct sk_buff_head rxq; static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); static void ep_timeout(unsigned long arg); static void connect_reply_upcall(struct c4iw_ep *ep, int status); static LIST_HEAD(timeout_list); static spinlock_t timeout_lock; static void start_ep_timer(struct c4iw_ep *ep) { PDBG("%s ep %p\n", __func__, ep); if (timer_pending(&ep->timer)) { PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); del_timer_sync(&ep->timer); } else c4iw_get_ep(&ep->com); ep->timer.expires = jiffies + ep_timeout_secs * HZ; ep->timer.data = (unsigned long)ep; ep->timer.function = ep_timeout; add_timer(&ep->timer); } static void stop_ep_timer(struct c4iw_ep *ep) { PDBG("%s ep %p\n", __func__, ep); if (!timer_pending(&ep->timer)) { printk(KERN_ERR "%s timer stopped when its not running! " "ep %p state %u\n", __func__, ep, ep->com.state); WARN_ON(1); return; } del_timer_sync(&ep->timer); c4iw_put_ep(&ep->com); } static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, struct l2t_entry *l2e) { int error = 0; if (c4iw_fatal_error(rdev)) { kfree_skb(skb); PDBG("%s - device in error state - dropping\n", __func__); return -EIO; } error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); if (error < 0) kfree_skb(skb); return error < 0 ? error : 0; } int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) { int error = 0; if (c4iw_fatal_error(rdev)) { kfree_skb(skb); PDBG("%s - device in error state - dropping\n", __func__); return -EIO; } error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); if (error < 0) kfree_skb(skb); return error < 0 ? error : 0; } static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) { struct cpl_tid_release *req; skb = get_skb(skb, sizeof *req, GFP_KERNEL); if (!skb) return; req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); INIT_TP_WR(req, hwtid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); c4iw_ofld_send(rdev, skb); return; } static void set_emss(struct c4iw_ep *ep, u16 opt) { ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; ep->mss = ep->emss; if (GET_TCPOPT_TSTAMP(opt)) ep->emss -= 12; if (ep->emss < 128) ep->emss = 128; PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), ep->mss, ep->emss); } static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) { enum c4iw_ep_state state; mutex_lock(&epc->mutex); state = epc->state; mutex_unlock(&epc->mutex); return state; } static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) { epc->state = new; } static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) { mutex_lock(&epc->mutex); PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); __state_set(epc, new); mutex_unlock(&epc->mutex); return; } static void *alloc_ep(int size, gfp_t gfp) { struct c4iw_ep_common *epc; epc = kzalloc(size, gfp); if (epc) { kref_init(&epc->kref); mutex_init(&epc->mutex); c4iw_init_wr_wait(&epc->wr_wait); } PDBG("%s alloc ep %p\n", __func__, epc); return epc; } void _c4iw_free_ep(struct kref *kref) { struct c4iw_ep *ep; ep = container_of(kref, struct c4iw_ep, com.kref); PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); } kfree(ep); } static void release_ep_resources(struct c4iw_ep *ep) { set_bit(RELEASE_RESOURCES, &ep->com.flags); c4iw_put_ep(&ep->com); } static int status2errno(int status) { switch (status) { case CPL_ERR_NONE: return 0; case CPL_ERR_CONN_RESET: return -ECONNRESET; case CPL_ERR_ARP_MISS: return -EHOSTUNREACH; case CPL_ERR_CONN_TIMEDOUT: return -ETIMEDOUT; case CPL_ERR_TCAM_FULL: return -ENOMEM; case CPL_ERR_CONN_EXIST: return -EADDRINUSE; default: return -EIO; } } /* * Try and reuse skbs already allocated... */ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) { if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { skb_trim(skb, 0); skb_get(skb); skb_reset_transport_header(skb); } else { skb = alloc_skb(len, gfp); } return skb; } static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, __be32 peer_ip, __be16 local_port, __be16 peer_port, u8 tos) { struct rtable *rt; struct flowi4 fl4; rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, peer_port, local_port, IPPROTO_TCP, tos, 0); if (IS_ERR(rt)) return NULL; return rt; } static void arp_failure_discard(void *handle, struct sk_buff *skb) { PDBG("%s c4iw_dev %p\n", __func__, handle); kfree_skb(skb); } /* * Handle an ARP failure for an active open. */ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) { printk(KERN_ERR MOD "ARP failure duing connect\n"); kfree_skb(skb); } /* * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant * and send it along. */ static void abort_arp_failure(void *handle, struct sk_buff *skb) { struct c4iw_rdev *rdev = handle; struct cpl_abort_req *req = cplhdr(skb); PDBG("%s rdev %p\n", __func__, rdev); req->cmd = CPL_ABORT_NO_RST; c4iw_ofld_send(rdev, skb); } static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) { unsigned int flowclen = 80; struct fw_flowc_wr *flowc; int i; skb = get_skb(skb, flowclen, GFP_KERNEL); flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8)); flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 16)) | FW_WR_FLOWID(ep->hwtid)); flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; flowc->mnemval[6].val = cpu_to_be32(snd_win); flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; flowc->mnemval[7].val = cpu_to_be32(ep->emss); /* Pad WR to 16 byte boundary */ flowc->mnemval[8].mnemonic = 0; flowc->mnemval[8].val = 0; for (i = 0; i < 9; i++) { flowc->mnemval[i].r4[0] = 0; flowc->mnemval[i].r4[1] = 0; flowc->mnemval[i].r4[2] = 0; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); c4iw_ofld_send(&ep->com.dev->rdev, skb); } static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) { struct cpl_close_con_req *req; struct sk_buff *skb; int wrlen = roundup(sizeof *req, 16); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); skb = get_skb(NULL, wrlen, gfp); if (!skb) { printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); t4_set_arp_err_handler(skb, NULL, arp_failure_discard); req = (struct cpl_close_con_req *) skb_put(skb, wrlen); memset(req, 0, wrlen); INIT_TP_WR(req, ep->hwtid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) { struct cpl_abort_req *req; int wrlen = roundup(sizeof *req, 16); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); skb = get_skb(skb, wrlen, gfp); if (!skb) { printk(KERN_ERR MOD "%s - failed to alloc skb.\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); req = (struct cpl_abort_req *) skb_put(skb, wrlen); memset(req, 0, wrlen); INIT_TP_WR(req, ep->hwtid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); req->cmd = CPL_ABORT_SEND_RST; return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static int send_connect(struct c4iw_ep *ep) { struct cpl_act_open_req *req; struct sk_buff *skb; u64 opt0; u32 opt2; unsigned int mtu_idx; int wscale; int wrlen = roundup(sizeof *req, 16); PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { printk(KERN_ERR MOD "%s - failed to alloc skb.\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); wscale = compute_wscale(rcv_win); opt0 = KEEP_ALIVE(1) | DELACK(1) | WND_SCALE(wscale) | MSS_IDX(mtu_idx) | L2T_IDX(ep->l2t->idx) | TX_CHAN(ep->tx_chan) | SMAC_SEL(ep->smac_idx) | DSCP(ep->tos) | ULP_MODE(ULP_MODE_TCPDDP) | RCV_BUFSIZ(rcv_win>>10); opt2 = RX_CHANNEL(0) | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); if (enable_tcp_timestamps) opt2 |= TSTAMPS_EN(1); if (enable_tcp_sack) opt2 |= SACK_EN(1); if (wscale && enable_tcp_window_scaling) opt2 |= WND_SCALE_EN(1); t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); req = (struct cpl_act_open_req *) skb_put(skb, wrlen); INIT_TP_WR(req, 0); OPCODE_TID(req) = cpu_to_be32( MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); req->local_port = ep->com.local_addr.sin_port; req->peer_port = ep->com.remote_addr.sin_port; req->local_ip = ep->com.local_addr.sin_addr.s_addr; req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; req->opt0 = cpu_to_be64(opt0); req->params = 0; req->opt2 = cpu_to_be32(opt2); return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, u8 mpa_rev_to_use) { int mpalen, wrlen; struct fw_ofld_tx_data_wr *req; struct mpa_message *mpa; struct mpa_v2_conn_params mpa_v2_params; PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); BUG_ON(skb_cloned(skb)); mpalen = sizeof(*mpa) + ep->plen; if (mpa_rev_to_use == 2) mpalen += sizeof(struct mpa_v2_conn_params); wrlen = roundup(mpalen + sizeof *req, 16); skb = get_skb(skb, wrlen, GFP_KERNEL); if (!skb) { connect_reply_upcall(ep, -ENOMEM); return; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); memset(req, 0, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL(1) | FW_WR_IMMDLEN(mpalen)); req->flowid_len16 = cpu_to_be32( FW_WR_FLOWID(ep->hwtid) | FW_WR_LEN16(wrlen >> 4)); req->plen = cpu_to_be32(mpalen); req->tunnel_to_proxy = cpu_to_be32( FW_OFLD_TX_DATA_WR_FLUSH(1) | FW_OFLD_TX_DATA_WR_SHOVE(1)); mpa = (struct mpa_message *)(req + 1); memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); mpa->flags = (crc_enabled ? MPA_CRC : 0) | (markers_enabled ? MPA_MARKERS : 0) | (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); mpa->private_data_size = htons(ep->plen); mpa->revision = mpa_rev_to_use; if (mpa_rev_to_use == 1) { ep->tried_with_mpa_v1 = 1; ep->retry_with_mpa_v1 = 0; } if (mpa_rev_to_use == 2) { mpa->private_data_size += htons(sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ord = htons((u16)ep->ord); if (peer2peer) { mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) mpa_v2_params.ord |= htons(MPA_V2_RDMA_WRITE_RTR); else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) mpa_v2_params.ord |= htons(MPA_V2_RDMA_READ_RTR); } memcpy(mpa->private_data, &mpa_v2_params, sizeof(struct mpa_v2_conn_params)); if (ep->plen) memcpy(mpa->private_data + sizeof(struct mpa_v2_conn_params), ep->mpa_pkt + sizeof(*mpa), ep->plen); } else if (ep->plen) memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); /* * Reference the mpa skb. This ensures the data area * will remain in memory until the hw acks the tx. * Function fw4_ack() will deref it. */ skb_get(skb); t4_set_arp_err_handler(skb, NULL, arp_failure_discard); BUG_ON(ep->mpa_skb); ep->mpa_skb = skb; c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); start_ep_timer(ep); state_set(&ep->com, MPA_REQ_SENT); ep->mpa_attr.initiator = 1; return; } static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) { int mpalen, wrlen; struct fw_ofld_tx_data_wr *req; struct mpa_message *mpa; struct sk_buff *skb; struct mpa_v2_conn_params mpa_v2_params; PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); mpalen = sizeof(*mpa) + plen; if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) mpalen += sizeof(struct mpa_v2_conn_params); wrlen = roundup(mpalen + sizeof *req, 16); skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); memset(req, 0, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL(1) | FW_WR_IMMDLEN(mpalen)); req->flowid_len16 = cpu_to_be32( FW_WR_FLOWID(ep->hwtid) | FW_WR_LEN16(wrlen >> 4)); req->plen = cpu_to_be32(mpalen); req->tunnel_to_proxy = cpu_to_be32( FW_OFLD_TX_DATA_WR_FLUSH(1) | FW_OFLD_TX_DATA_WR_SHOVE(1)); mpa = (struct mpa_message *)(req + 1); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = MPA_REJECT; mpa->revision = mpa_rev; mpa->private_data_size = htons(plen); if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->private_data_size += htons(sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons(((u16)ep->ird) | (peer2peer ? MPA_V2_PEER2PEER_MODEL : 0)); mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE ? MPA_V2_RDMA_WRITE_RTR : p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ ? MPA_V2_RDMA_READ_RTR : 0) : 0)); memcpy(mpa->private_data, &mpa_v2_params, sizeof(struct mpa_v2_conn_params)); if (ep->plen) memcpy(mpa->private_data + sizeof(struct mpa_v2_conn_params), pdata, plen); } else if (plen) memcpy(mpa->private_data, pdata, plen); /* * Reference the mpa skb again. This ensures the data area * will remain in memory until the hw acks the tx. * Function fw4_ack() will deref it. */ skb_get(skb); set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); t4_set_arp_err_handler(skb, NULL, arp_failure_discard); BUG_ON(ep->mpa_skb); ep->mpa_skb = skb; return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) { int mpalen, wrlen; struct fw_ofld_tx_data_wr *req; struct mpa_message *mpa; struct sk_buff *skb; struct mpa_v2_conn_params mpa_v2_params; PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); mpalen = sizeof(*mpa) + plen; if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) mpalen += sizeof(struct mpa_v2_conn_params); wrlen = roundup(mpalen + sizeof *req, 16); skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); memset(req, 0, wrlen); req->op_to_immdlen = cpu_to_be32( FW_WR_OP(FW_OFLD_TX_DATA_WR) | FW_WR_COMPL(1) | FW_WR_IMMDLEN(mpalen)); req->flowid_len16 = cpu_to_be32( FW_WR_FLOWID(ep->hwtid) | FW_WR_LEN16(wrlen >> 4)); req->plen = cpu_to_be32(mpalen); req->tunnel_to_proxy = cpu_to_be32( FW_OFLD_TX_DATA_WR_FLUSH(1) | FW_OFLD_TX_DATA_WR_SHOVE(1)); mpa = (struct mpa_message *)(req + 1); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | (markers_enabled ? MPA_MARKERS : 0); mpa->revision = ep->mpa_attr.version; mpa->private_data_size = htons(plen); if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->private_data_size += htons(sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ord = htons((u16)ep->ord); if (peer2peer && (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED)) { mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) mpa_v2_params.ord |= htons(MPA_V2_RDMA_WRITE_RTR); else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) mpa_v2_params.ord |= htons(MPA_V2_RDMA_READ_RTR); } memcpy(mpa->private_data, &mpa_v2_params, sizeof(struct mpa_v2_conn_params)); if (ep->plen) memcpy(mpa->private_data + sizeof(struct mpa_v2_conn_params), pdata, plen); } else if (plen) memcpy(mpa->private_data, pdata, plen); /* * Reference the mpa skb. This ensures the data area * will remain in memory until the hw acks the tx. * Function fw4_ack() will deref it. */ skb_get(skb); t4_set_arp_err_handler(skb, NULL, arp_failure_discard); ep->mpa_skb = skb; state_set(&ep->com, MPA_REP_SENT); return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_act_establish *req = cplhdr(skb); unsigned int tid = GET_TID(req); unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); struct tid_info *t = dev->rdev.lldi.tids; ep = lookup_atid(t, atid); PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); dst_confirm(ep->dst); /* setup the hwtid for this connection */ ep->hwtid = tid; cxgb4_insert_tid(t, ep, tid); ep->snd_seq = be32_to_cpu(req->snd_isn); ep->rcv_seq = be32_to_cpu(req->rcv_isn); set_emss(ep, ntohs(req->tcp_opt)); /* dealloc the atid */ cxgb4_free_atid(t, atid); /* start MPA negotiation */ send_flowc(ep, NULL); if (ep->retry_with_mpa_v1) send_mpa_req(ep, skb, 1); else send_mpa_req(ep, skb, mpa_rev); return 0; } static void close_complete_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CLOSE; if (ep->com.cm_id) { PDBG("close complete delivered ep %p cm_id %p tid %u\n", ep, ep->com.cm_id, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); ep->com.cm_id->rem_ref(ep->com.cm_id); ep->com.cm_id = NULL; ep->com.qp = NULL; } } static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) { PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); close_complete_upcall(ep); state_set(&ep->com, ABORTING); return send_abort(ep, skb, gfp); } static void peer_close_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_DISCONNECT; if (ep->com.cm_id) { PDBG("peer close delivered ep %p cm_id %p tid %u\n", ep, ep->com.cm_id, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); } } static void peer_abort_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CLOSE; event.status = -ECONNRESET; if (ep->com.cm_id) { PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, ep->com.cm_id, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); ep->com.cm_id->rem_ref(ep->com.cm_id); ep->com.cm_id = NULL; ep->com.qp = NULL; } } static void connect_reply_upcall(struct c4iw_ep *ep, int status) { struct iw_cm_event event; PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CONNECT_REPLY; event.status = status; event.local_addr = ep->com.local_addr; event.remote_addr = ep->com.remote_addr; if ((status == 0) || (status == -ECONNREFUSED)) { if (!ep->tried_with_mpa_v1) { /* this means MPA_v2 is used */ event.private_data_len = ep->plen - sizeof(struct mpa_v2_conn_params); event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + sizeof(struct mpa_v2_conn_params); } else { /* this means MPA_v1 is used */ event.private_data_len = ep->plen; event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); } } PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); ep->com.cm_id->event_handler(ep->com.cm_id, &event); if (status < 0) { ep->com.cm_id->rem_ref(ep->com.cm_id); ep->com.cm_id = NULL; ep->com.qp = NULL; } } static void connect_request_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_CONNECT_REQUEST; event.local_addr = ep->com.local_addr; event.remote_addr = ep->com.remote_addr; event.provider_data = ep; if (!ep->tried_with_mpa_v1) { /* this means MPA_v2 is used */ event.ord = ep->ord; event.ird = ep->ird; event.private_data_len = ep->plen - sizeof(struct mpa_v2_conn_params); event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + sizeof(struct mpa_v2_conn_params); } else { /* this means MPA_v1 is used. Send max supported */ event.ord = c4iw_max_read_depth; event.ird = c4iw_max_read_depth; event.private_data_len = ep->plen; event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); } if (state_read(&ep->parent_ep->com) != DEAD) { c4iw_get_ep(&ep->com); ep->parent_ep->com.cm_id->event_handler( ep->parent_ep->com.cm_id, &event); } c4iw_put_ep(&ep->parent_ep->com); ep->parent_ep = NULL; } static void established_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); memset(&event, 0, sizeof(event)); event.event = IW_CM_EVENT_ESTABLISHED; event.ird = ep->ird; event.ord = ep->ord; if (ep->com.cm_id) { PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); } } static int update_rx_credits(struct c4iw_ep *ep, u32 credits) { struct cpl_rx_data_ack *req; struct sk_buff *skb; int wrlen = roundup(sizeof *req, 16); PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); return 0; } req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); memset(req, 0, wrlen); INIT_TP_WR(req, ep->hwtid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | F_RX_DACK_CHANGE | V_RX_DACK_MODE(dack_mode)); set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); c4iw_ofld_send(&ep->com.dev->rdev, skb); return credits; } static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) { struct mpa_message *mpa; struct mpa_v2_conn_params *mpa_v2_params; u16 plen; u16 resp_ird, resp_ord; u8 rtr_mismatch = 0, insuff_ird = 0; struct c4iw_qp_attributes attrs; enum c4iw_qp_attr_mask mask; int err; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); /* * Stop mpa timer. If it expired, then the state has * changed and we bail since ep_timeout already aborted * the connection. */ stop_ep_timer(ep); if (state_read(&ep->com) != MPA_REQ_SENT) return; /* * If we get more than the supported amount of private data * then we must fail this connection. */ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { err = -EINVAL; goto err; } /* * copy the new data into our accumulation buffer. */ skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), skb->len); ep->mpa_pkt_len += skb->len; /* * if we don't even have the mpa message, then bail. */ if (ep->mpa_pkt_len < sizeof(*mpa)) return; mpa = (struct mpa_message *) ep->mpa_pkt; /* Validate MPA header. */ if (mpa->revision > mpa_rev) { printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," " Received = %d\n", __func__, mpa_rev, mpa->revision); err = -EPROTO; goto err; } if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { err = -EPROTO; goto err; } plen = ntohs(mpa->private_data_size); /* * Fail if there's too much private data. */ if (plen > MPA_MAX_PRIVATE_DATA) { err = -EPROTO; goto err; } /* * If plen does not account for pkt size */ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { err = -EPROTO; goto err; } ep->plen = (u8) plen; /* * If we don't have all the pdata yet, then bail. * We'll continue process when more data arrives. */ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) return; if (mpa->flags & MPA_REJECT) { err = -ECONNREFUSED; goto err; } /* * If we get here we have accumulated the entire mpa * start reply message including private data. And * the MPA header is valid. */ state_set(&ep->com, FPDU_MODE); ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; ep->mpa_attr.recv_marker_enabled = markers_enabled; ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; ep->mpa_attr.version = mpa->revision; ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; if (mpa->revision == 2) { ep->mpa_attr.enhanced_rdma_conn = mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; if (ep->mpa_attr.enhanced_rdma_conn) { mpa_v2_params = (struct mpa_v2_conn_params *) (ep->mpa_pkt + sizeof(*mpa)); resp_ird = ntohs(mpa_v2_params->ird) & MPA_V2_IRD_ORD_MASK; resp_ord = ntohs(mpa_v2_params->ord) & MPA_V2_IRD_ORD_MASK; /* * This is a double-check. Ideally, below checks are * not required since ird/ord stuff has been taken * care of in c4iw_accept_cr */ if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { err = -ENOMEM; ep->ird = resp_ord; ep->ord = resp_ird; insuff_ird = 1; } if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) { if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_WRITE_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_RDMA_WRITE; else if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_READ_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; } } } else if (mpa->revision == 1) if (peer2peer) ep->mpa_attr.p2p_type = p2p_type; PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " "%d\n", __func__, ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, ep->mpa_attr.p2p_type, p2p_type); /* * If responder's RTR does not match with that of initiator, assign * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not * generated when moving QP to RTS state. * A TERM message will be sent after QP has moved to RTS state */ if ((ep->mpa_attr.version == 2) && peer2peer && (ep->mpa_attr.p2p_type != p2p_type)) { ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; rtr_mismatch = 1; } attrs.mpa_attr = ep->mpa_attr; attrs.max_ird = ep->ird; attrs.max_ord = ep->ord; attrs.llp_stream_handle = ep; attrs.next_state = C4IW_QP_STATE_RTS; mask = C4IW_QP_ATTR_NEXT_STATE | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; /* bind QP and TID with INIT_WR */ err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); if (err) goto err; /* * If responder's RTR requirement did not match with what initiator * supports, generate TERM message */ if (rtr_mismatch) { printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.ecode = MPA_NOMATCH_RTR; attrs.next_state = C4IW_QP_STATE_TERMINATE; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); err = -ENOMEM; goto out; } /* * Generate TERM if initiator IRD is not sufficient for responder * provided ORD. Currently, we do the same behaviour even when * responder provided IRD is also not sufficient as regards to * initiator ORD. */ if (insuff_ird) { printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", __func__); attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.ecode = MPA_INSUFF_IRD; attrs.next_state = C4IW_QP_STATE_TERMINATE; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); err = -ENOMEM; goto out; } goto out; err: state_set(&ep->com, ABORTING); send_abort(ep, skb, GFP_KERNEL); out: connect_reply_upcall(ep, err); return; } static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) { struct mpa_message *mpa; struct mpa_v2_conn_params *mpa_v2_params; u16 plen; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); if (state_read(&ep->com) != MPA_REQ_WAIT) return; /* * If we get more than the supported amount of private data * then we must fail this connection. */ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { stop_ep_timer(ep); abort_connection(ep, skb, GFP_KERNEL); return; } PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); /* * Copy the new data into our accumulation buffer. */ skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), skb->len); ep->mpa_pkt_len += skb->len; /* * If we don't even have the mpa message, then bail. * We'll continue process when more data arrives. */ if (ep->mpa_pkt_len < sizeof(*mpa)) return; PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); stop_ep_timer(ep); mpa = (struct mpa_message *) ep->mpa_pkt; /* * Validate MPA Header. */ if (mpa->revision > mpa_rev) { printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," " Received = %d\n", __func__, mpa_rev, mpa->revision); abort_connection(ep, skb, GFP_KERNEL); return; } if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { abort_connection(ep, skb, GFP_KERNEL); return; } plen = ntohs(mpa->private_data_size); /* * Fail if there's too much private data. */ if (plen > MPA_MAX_PRIVATE_DATA) { abort_connection(ep, skb, GFP_KERNEL); return; } /* * If plen does not account for pkt size */ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { abort_connection(ep, skb, GFP_KERNEL); return; } ep->plen = (u8) plen; /* * If we don't have all the pdata yet, then bail. */ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) return; /* * If we get here we have accumulated the entire mpa * start reply message including private data. */ ep->mpa_attr.initiator = 0; ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; ep->mpa_attr.recv_marker_enabled = markers_enabled; ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; ep->mpa_attr.version = mpa->revision; if (mpa->revision == 1) ep->tried_with_mpa_v1 = 1; ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; if (mpa->revision == 2) { ep->mpa_attr.enhanced_rdma_conn = mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; if (ep->mpa_attr.enhanced_rdma_conn) { mpa_v2_params = (struct mpa_v2_conn_params *) (ep->mpa_pkt + sizeof(*mpa)); ep->ird = ntohs(mpa_v2_params->ird) & MPA_V2_IRD_ORD_MASK; ep->ord = ntohs(mpa_v2_params->ord) & MPA_V2_IRD_ORD_MASK; if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) if (peer2peer) { if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_WRITE_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_RDMA_WRITE; else if (ntohs(mpa_v2_params->ord) & MPA_V2_RDMA_READ_RTR) ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; } } } else if (mpa->revision == 1) if (peer2peer) ep->mpa_attr.p2p_type = p2p_type; PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, ep->mpa_attr.p2p_type); state_set(&ep->com, MPA_REQ_RCVD); /* drive upcall */ connect_request_upcall(ep); return; } static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_rx_data *hdr = cplhdr(skb); unsigned int dlen = ntohs(hdr->len); unsigned int tid = GET_TID(hdr); struct tid_info *t = dev->rdev.lldi.tids; ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); skb_pull(skb, sizeof(*hdr)); skb_trim(skb, dlen); ep->rcv_seq += dlen; BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); /* update RX credits */ update_rx_credits(ep, dlen); switch (state_read(&ep->com)) { case MPA_REQ_SENT: process_mpa_reply(ep, skb); break; case MPA_REQ_WAIT: process_mpa_request(ep, skb); break; case MPA_REP_SENT: break; default: printk(KERN_ERR MOD "%s Unexpected streaming data." " ep %p state %d tid %u\n", __func__, ep, state_read(&ep->com), ep->hwtid); /* * The ep will timeout and inform the ULP of the failure. * See ep_timeout(). */ break; } return 0; } static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_abort_rpl_rss *rpl = cplhdr(skb); int release = 0; unsigned int tid = GET_TID(rpl); struct tid_info *t = dev->rdev.lldi.tids; ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); BUG_ON(!ep); mutex_lock(&ep->com.mutex); switch (ep->com.state) { case ABORTING: __state_set(&ep->com, DEAD); release = 1; break; default: printk(KERN_ERR "%s ep %p state %d\n", __func__, ep, ep->com.state); break; } mutex_unlock(&ep->com.mutex); if (release) release_ep_resources(ep); return 0; } /* * Return whether a failed active open has allocated a TID */ static inline int act_open_has_tid(int status) { return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && status != CPL_ERR_ARP_MISS; } static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_act_open_rpl *rpl = cplhdr(skb); unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( ntohl(rpl->atid_status))); struct tid_info *t = dev->rdev.lldi.tids; int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); ep = lookup_atid(t, atid); PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, status, status2errno(status)); if (status == CPL_ERR_RTX_NEG_ADVICE) { printk(KERN_WARNING MOD "Connection problems for atid %u\n", atid); return 0; } connect_reply_upcall(ep, status2errno(status)); state_set(&ep->com, DEAD); if (status && act_open_has_tid(status)) cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); cxgb4_free_atid(t, atid); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); c4iw_put_ep(&ep->com); return 0; } static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_pass_open_rpl *rpl = cplhdr(skb); struct tid_info *t = dev->rdev.lldi.tids; unsigned int stid = GET_TID(rpl); struct c4iw_listen_ep *ep = lookup_stid(t, stid); if (!ep) { printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); return 0; } PDBG("%s ep %p status %d error %d\n", __func__, ep, rpl->status, status2errno(rpl->status)); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); return 0; } static int listen_stop(struct c4iw_listen_ep *ep) { struct sk_buff *skb; struct cpl_close_listsvr_req *req; PDBG("%s ep %p\n", __func__, ep); skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); if (!skb) { printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); return -ENOMEM; } req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); INIT_TP_WR(req, 0); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); req->reply_ctrl = cpu_to_be16( QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); return c4iw_ofld_send(&ep->com.dev->rdev, skb); } static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); struct tid_info *t = dev->rdev.lldi.tids; unsigned int stid = GET_TID(rpl); struct c4iw_listen_ep *ep = lookup_stid(t, stid); PDBG("%s ep %p\n", __func__, ep); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); return 0; } static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, struct cpl_pass_accept_req *req) { struct cpl_pass_accept_rpl *rpl; unsigned int mtu_idx; u64 opt0; u32 opt2; int wscale; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); BUG_ON(skb_cloned(skb)); skb_trim(skb, sizeof(*rpl)); skb_get(skb); cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); wscale = compute_wscale(rcv_win); opt0 = KEEP_ALIVE(1) | DELACK(1) | WND_SCALE(wscale) | MSS_IDX(mtu_idx) | L2T_IDX(ep->l2t->idx) | TX_CHAN(ep->tx_chan) | SMAC_SEL(ep->smac_idx) | DSCP(ep->tos) | ULP_MODE(ULP_MODE_TCPDDP) | RCV_BUFSIZ(rcv_win>>10); opt2 = RX_CHANNEL(0) | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); if (enable_tcp_timestamps && req->tcpopt.tstamp) opt2 |= TSTAMPS_EN(1); if (enable_tcp_sack && req->tcpopt.sack) opt2 |= SACK_EN(1); if (wscale && enable_tcp_window_scaling) opt2 |= WND_SCALE_EN(1); rpl = cplhdr(skb); INIT_TP_WR(rpl, ep->hwtid); OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid)); rpl->opt0 = cpu_to_be64(opt0); rpl->opt2 = cpu_to_be32(opt2); set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); return; } static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, struct sk_buff *skb) { PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, peer_ip); BUG_ON(skb_cloned(skb)); skb_trim(skb, sizeof(struct cpl_tid_release)); skb_get(skb); release_tid(&dev->rdev, hwtid, skb); return; } static void get_4tuple(struct cpl_pass_accept_req *req, __be32 *local_ip, __be32 *peer_ip, __be16 *local_port, __be16 *peer_port) { int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); struct tcphdr *tcp = (struct tcphdr *) ((u8 *)(req + 1) + eth_len + ip_len); PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), ntohs(tcp->dest)); *peer_ip = ip->saddr; *local_ip = ip->daddr; *peer_port = tcp->source; *local_port = tcp->dest; return; } static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, struct c4iw_dev *cdev, bool clear_mpa_v1) { struct neighbour *n; int err, step; n = dst_neigh_lookup(dst, &peer_ip); if (!n) return -ENODEV; rcu_read_lock(); err = -ENOMEM; if (n->dev->flags & IFF_LOOPBACK) { struct net_device *pdev; pdev = ip_dev_find(&init_net, peer_ip); ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, n, pdev, 0); if (!ep->l2t) goto out; ep->mtu = pdev->mtu; ep->tx_chan = cxgb4_port_chan(pdev); ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; step = cdev->rdev.lldi.ntxq / cdev->rdev.lldi.nchan; ep->txq_idx = cxgb4_port_idx(pdev) * step; step = cdev->rdev.lldi.nrxq / cdev->rdev.lldi.nchan; ep->ctrlq_idx = cxgb4_port_idx(pdev); ep->rss_qid = cdev->rdev.lldi.rxq_ids[ cxgb4_port_idx(pdev) * step]; dev_put(pdev); } else { ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, n, n->dev, 0); if (!ep->l2t) goto out; ep->mtu = dst_mtu(dst); ep->tx_chan = cxgb4_port_chan(n->dev); ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1; step = cdev->rdev.lldi.ntxq / cdev->rdev.lldi.nchan; ep->txq_idx = cxgb4_port_idx(n->dev) * step; ep->ctrlq_idx = cxgb4_port_idx(n->dev); step = cdev->rdev.lldi.nrxq / cdev->rdev.lldi.nchan; ep->rss_qid = cdev->rdev.lldi.rxq_ids[ cxgb4_port_idx(n->dev) * step]; if (clear_mpa_v1) { ep->retry_with_mpa_v1 = 0; ep->tried_with_mpa_v1 = 0; } } err = 0; out: rcu_read_unlock(); neigh_release(n); return err; } static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *child_ep, *parent_ep; struct cpl_pass_accept_req *req = cplhdr(skb); unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); struct tid_info *t = dev->rdev.lldi.tids; unsigned int hwtid = GET_TID(req); struct dst_entry *dst; struct rtable *rt; __be32 local_ip, peer_ip; __be16 local_port, peer_port; int err; parent_ep = lookup_stid(t, stid); PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); if (state_read(&parent_ep->com) != LISTEN) { printk(KERN_ERR "%s - listening ep not in LISTEN\n", __func__); goto reject; } /* Find output route */ rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, GET_POPEN_TOS(ntohl(req->tos_stid))); if (!rt) { printk(KERN_ERR MOD "%s - failed to find dst entry!\n", __func__); goto reject; } dst = &rt->dst; child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); if (!child_ep) { printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", __func__); dst_release(dst); goto reject; } err = import_ep(child_ep, peer_ip, dst, dev, false); if (err) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); dst_release(dst); kfree(child_ep); goto reject; } state_set(&child_ep->com, CONNECTING); child_ep->com.dev = dev; child_ep->com.cm_id = NULL; child_ep->com.local_addr.sin_family = PF_INET; child_ep->com.local_addr.sin_port = local_port; child_ep->com.local_addr.sin_addr.s_addr = local_ip; child_ep->com.remote_addr.sin_family = PF_INET; child_ep->com.remote_addr.sin_port = peer_port; child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; c4iw_get_ep(&parent_ep->com); child_ep->parent_ep = parent_ep; child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); child_ep->dst = dst; child_ep->hwtid = hwtid; PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); init_timer(&child_ep->timer); cxgb4_insert_tid(t, child_ep, hwtid); accept_cr(child_ep, peer_ip, skb, req); goto out; reject: reject_cr(dev, hwtid, peer_ip, skb); out: return 0; } static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_pass_establish *req = cplhdr(skb); struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(req); ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); ep->snd_seq = be32_to_cpu(req->snd_isn); ep->rcv_seq = be32_to_cpu(req->rcv_isn); set_emss(ep, ntohs(req->tcp_opt)); dst_confirm(ep->dst); state_set(&ep->com, MPA_REQ_WAIT); start_ep_timer(ep); send_flowc(ep, skb); return 0; } static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_peer_close *hdr = cplhdr(skb); struct c4iw_ep *ep; struct c4iw_qp_attributes attrs; int disconnect = 1; int release = 0; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(hdr); int ret; ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); dst_confirm(ep->dst); mutex_lock(&ep->com.mutex); switch (ep->com.state) { case MPA_REQ_WAIT: __state_set(&ep->com, CLOSING); break; case MPA_REQ_SENT: __state_set(&ep->com, CLOSING); connect_reply_upcall(ep, -ECONNRESET); break; case MPA_REQ_RCVD: /* * We're gonna mark this puppy DEAD, but keep * the reference on it until the ULP accepts or * rejects the CR. Also wake up anyone waiting * in rdma connection migration (see c4iw_accept_cr()). */ __state_set(&ep->com, CLOSING); PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); break; case MPA_REP_SENT: __state_set(&ep->com, CLOSING); PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); break; case FPDU_MODE: start_ep_timer(ep); __state_set(&ep->com, CLOSING); attrs.next_state = C4IW_QP_STATE_CLOSING; ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); if (ret != -ECONNRESET) { peer_close_upcall(ep); disconnect = 1; } break; case ABORTING: disconnect = 0; break; case CLOSING: __state_set(&ep->com, MORIBUND); disconnect = 0; break; case MORIBUND: stop_ep_timer(ep); if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_IDLE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } close_complete_upcall(ep); __state_set(&ep->com, DEAD); release = 1; disconnect = 0; break; case DEAD: disconnect = 0; break; default: BUG_ON(1); } mutex_unlock(&ep->com.mutex); if (disconnect) c4iw_ep_disconnect(ep, 0, GFP_KERNEL); if (release) release_ep_resources(ep); return 0; } /* * Returns whether an ABORT_REQ_RSS message is a negative advice. */ static int is_neg_adv_abort(unsigned int status) { return status == CPL_ERR_RTX_NEG_ADVICE || status == CPL_ERR_PERSIST_NEG_ADVICE; } static int c4iw_reconnect(struct c4iw_ep *ep) { struct rtable *rt; int err = 0; PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); init_timer(&ep->timer); /* * Allocate an active TID to initiate a TCP connection. */ ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); if (ep->atid == -1) { printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); err = -ENOMEM; goto fail2; } /* find a route */ rt = find_route(ep->com.dev, ep->com.cm_id->local_addr.sin_addr.s_addr, ep->com.cm_id->remote_addr.sin_addr.s_addr, ep->com.cm_id->local_addr.sin_port, ep->com.cm_id->remote_addr.sin_port, 0); if (!rt) { printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); err = -EHOSTUNREACH; goto fail3; } ep->dst = &rt->dst; err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr, ep->dst, ep->com.dev, false); if (err) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); goto fail4; } PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, ep->l2t->idx); state_set(&ep->com, CONNECTING); ep->tos = 0; /* send connect request to rnic */ err = send_connect(ep); if (!err) goto out; cxgb4_l2t_release(ep->l2t); fail4: dst_release(ep->dst); fail3: cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); fail2: /* * remember to send notification to upper layer. * We are in here so the upper layer is not aware that this is * re-connect attempt and so, upper layer is still waiting for * response of 1st connect request. */ connect_reply_upcall(ep, -ECONNRESET); c4iw_put_ep(&ep->com); out: return err; } static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_abort_req_rss *req = cplhdr(skb); struct c4iw_ep *ep; struct cpl_abort_rpl *rpl; struct sk_buff *rpl_skb; struct c4iw_qp_attributes attrs; int ret; int release = 0; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(req); ep = lookup_tid(t, tid); if (is_neg_adv_abort(req->status)) { PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, ep->hwtid); return 0; } PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, ep->com.state); /* * Wake up any threads in rdma_init() or rdma_fini(). * However, this is not needed if com state is just * MPA_REQ_SENT */ if (ep->com.state != MPA_REQ_SENT) c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); mutex_lock(&ep->com.mutex); switch (ep->com.state) { case CONNECTING: break; case MPA_REQ_WAIT: stop_ep_timer(ep); break; case MPA_REQ_SENT: stop_ep_timer(ep); if (mpa_rev == 2 && ep->tried_with_mpa_v1) connect_reply_upcall(ep, -ECONNRESET); else { /* * we just don't send notification upwards because we * want to retry with mpa_v1 without upper layers even * knowing it. * * do some housekeeping so as to re-initiate the * connection */ PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, mpa_rev); ep->retry_with_mpa_v1 = 1; } break; case MPA_REP_SENT: break; case MPA_REQ_RCVD: break; case MORIBUND: case CLOSING: stop_ep_timer(ep); /*FALLTHROUGH*/ case FPDU_MODE: if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_ERROR; ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); if (ret) printk(KERN_ERR MOD "%s - qp <- error failed!\n", __func__); } peer_abort_upcall(ep); break; case ABORTING: break; case DEAD: PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); mutex_unlock(&ep->com.mutex); return 0; default: BUG_ON(1); break; } dst_confirm(ep->dst); if (ep->com.state != ABORTING) { __state_set(&ep->com, DEAD); /* we don't release if we want to retry with mpa_v1 */ if (!ep->retry_with_mpa_v1) release = 1; } mutex_unlock(&ep->com.mutex); rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); if (!rpl_skb) { printk(KERN_ERR MOD "%s - cannot allocate skb!\n", __func__); release = 1; goto out; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); INIT_TP_WR(rpl, ep->hwtid); OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); rpl->cmd = CPL_ABORT_NO_RST; c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); out: if (release) release_ep_resources(ep); /* retry with mpa-v1 */ if (ep && ep->retry_with_mpa_v1) { cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); c4iw_reconnect(ep); } return 0; } static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct c4iw_qp_attributes attrs; struct cpl_close_con_rpl *rpl = cplhdr(skb); int release = 0; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(rpl); ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); BUG_ON(!ep); /* The cm_id may be null if we failed to connect */ mutex_lock(&ep->com.mutex); switch (ep->com.state) { case CLOSING: __state_set(&ep->com, MORIBUND); break; case MORIBUND: stop_ep_timer(ep); if ((ep->com.cm_id) && (ep->com.qp)) { attrs.next_state = C4IW_QP_STATE_IDLE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } close_complete_upcall(ep); __state_set(&ep->com, DEAD); release = 1; break; case ABORTING: case DEAD: break; default: BUG_ON(1); break; } mutex_unlock(&ep->com.mutex); if (release) release_ep_resources(ep); return 0; } static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_rdma_terminate *rpl = cplhdr(skb); struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(rpl); struct c4iw_ep *ep; struct c4iw_qp_attributes attrs; ep = lookup_tid(t, tid); BUG_ON(!ep); if (ep && ep->com.qp) { printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, ep->com.qp->wq.sq.qid); attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } else printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); return 0; } /* * Upcall from the adapter indicating data has been transmitted. * For us its just the single MPA request or reply. We can now free * the skb holding the mpa message. */ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_fw4_ack *hdr = cplhdr(skb); u8 credits = hdr->credits; unsigned int tid = GET_TID(hdr); struct tid_info *t = dev->rdev.lldi.tids; ep = lookup_tid(t, tid); PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); if (credits == 0) { PDBG("%s 0 credit ack ep %p tid %u state %u\n", __func__, ep, ep->hwtid, state_read(&ep->com)); return 0; } dst_confirm(ep->dst); if (ep->mpa_skb) { PDBG("%s last streaming msg ack ep %p tid %u state %u " "initiator %u freeing skb\n", __func__, ep, ep->hwtid, state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); kfree_skb(ep->mpa_skb); ep->mpa_skb = NULL; } return 0; } int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { int err; struct c4iw_ep *ep = to_ep(cm_id); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); if (state_read(&ep->com) == DEAD) { c4iw_put_ep(&ep->com); return -ECONNRESET; } BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); if (mpa_rev == 0) abort_connection(ep, NULL, GFP_KERNEL); else { err = send_mpa_reject(ep, pdata, pdata_len); err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); } c4iw_put_ep(&ep->com); return 0; } int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { int err; struct c4iw_qp_attributes attrs; enum c4iw_qp_attr_mask mask; struct c4iw_ep *ep = to_ep(cm_id); struct c4iw_dev *h = to_c4iw_dev(cm_id->device); struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); if (state_read(&ep->com) == DEAD) { err = -ECONNRESET; goto err; } BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); BUG_ON(!qp); if ((conn_param->ord > c4iw_max_read_depth) || (conn_param->ird > c4iw_max_read_depth)) { abort_connection(ep, NULL, GFP_KERNEL); err = -EINVAL; goto err; } if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { if (conn_param->ord > ep->ird) { ep->ird = conn_param->ird; ep->ord = conn_param->ord; send_mpa_reject(ep, conn_param->private_data, conn_param->private_data_len); abort_connection(ep, NULL, GFP_KERNEL); err = -ENOMEM; goto err; } if (conn_param->ird > ep->ord) { if (!ep->ord) conn_param->ird = 1; else { abort_connection(ep, NULL, GFP_KERNEL); err = -ENOMEM; goto err; } } } ep->ird = conn_param->ird; ep->ord = conn_param->ord; if (ep->mpa_attr.version != 2) if (peer2peer && ep->ird == 0) ep->ird = 1; PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); cm_id->add_ref(cm_id); ep->com.cm_id = cm_id; ep->com.qp = qp; /* bind QP to EP and move to RTS */ attrs.mpa_attr = ep->mpa_attr; attrs.max_ird = ep->ird; attrs.max_ord = ep->ord; attrs.llp_stream_handle = ep; attrs.next_state = C4IW_QP_STATE_RTS; /* bind QP and TID with INIT_WR */ mask = C4IW_QP_ATTR_NEXT_STATE | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); if (err) goto err1; err = send_mpa_reply(ep, conn_param->private_data, conn_param->private_data_len); if (err) goto err1; state_set(&ep->com, FPDU_MODE); established_upcall(ep); c4iw_put_ep(&ep->com); return 0; err1: ep->com.cm_id = NULL; ep->com.qp = NULL; cm_id->rem_ref(cm_id); err: c4iw_put_ep(&ep->com); return err; } int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) { struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); struct c4iw_ep *ep; struct rtable *rt; int err = 0; if ((conn_param->ord > c4iw_max_read_depth) || (conn_param->ird > c4iw_max_read_depth)) { err = -EINVAL; goto out; } ep = alloc_ep(sizeof(*ep), GFP_KERNEL); if (!ep) { printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); err = -ENOMEM; goto out; } init_timer(&ep->timer); ep->plen = conn_param->private_data_len; if (ep->plen) memcpy(ep->mpa_pkt + sizeof(struct mpa_message), conn_param->private_data, ep->plen); ep->ird = conn_param->ird; ep->ord = conn_param->ord; if (peer2peer && ep->ord == 0) ep->ord = 1; cm_id->add_ref(cm_id); ep->com.dev = dev; ep->com.cm_id = cm_id; ep->com.qp = get_qhp(dev, conn_param->qpn); BUG_ON(!ep->com.qp); PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, ep->com.qp, cm_id); /* * Allocate an active TID to initiate a TCP connection. */ ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); if (ep->atid == -1) { printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); err = -ENOMEM; goto fail2; } PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, ntohl(cm_id->local_addr.sin_addr.s_addr), ntohs(cm_id->local_addr.sin_port), ntohl(cm_id->remote_addr.sin_addr.s_addr), ntohs(cm_id->remote_addr.sin_port)); /* find a route */ rt = find_route(dev, cm_id->local_addr.sin_addr.s_addr, cm_id->remote_addr.sin_addr.s_addr, cm_id->local_addr.sin_port, cm_id->remote_addr.sin_port, 0); if (!rt) { printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); err = -EHOSTUNREACH; goto fail3; } ep->dst = &rt->dst; err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr, ep->dst, ep->com.dev, true); if (err) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); goto fail4; } PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, ep->l2t->idx); state_set(&ep->com, CONNECTING); ep->tos = 0; ep->com.local_addr = cm_id->local_addr; ep->com.remote_addr = cm_id->remote_addr; /* send connect request to rnic */ err = send_connect(ep); if (!err) goto out; cxgb4_l2t_release(ep->l2t); fail4: dst_release(ep->dst); fail3: cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); fail2: cm_id->rem_ref(cm_id); c4iw_put_ep(&ep->com); out: return err; } int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) { int err = 0; struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); struct c4iw_listen_ep *ep; might_sleep(); ep = alloc_ep(sizeof(*ep), GFP_KERNEL); if (!ep) { printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); err = -ENOMEM; goto fail1; } PDBG("%s ep %p\n", __func__, ep); cm_id->add_ref(cm_id); ep->com.cm_id = cm_id; ep->com.dev = dev; ep->backlog = backlog; ep->com.local_addr = cm_id->local_addr; /* * Allocate a server TID. */ ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); if (ep->stid == -1) { printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); err = -ENOMEM; goto fail2; } state_set(&ep->com, LISTEN); c4iw_init_wr_wait(&ep->com.wr_wait); err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, ep->com.local_addr.sin_addr.s_addr, ep->com.local_addr.sin_port, ep->com.dev->rdev.lldi.rxq_ids[0]); if (err) goto fail3; /* wait for pass_open_rpl */ err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, __func__); if (!err) { cm_id->provider_data = ep; goto out; } fail3: cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); fail2: cm_id->rem_ref(cm_id); c4iw_put_ep(&ep->com); fail1: out: return err; } int c4iw_destroy_listen(struct iw_cm_id *cm_id) { int err; struct c4iw_listen_ep *ep = to_listen_ep(cm_id); PDBG("%s ep %p\n", __func__, ep); might_sleep(); state_set(&ep->com, DEAD); c4iw_init_wr_wait(&ep->com.wr_wait); err = listen_stop(ep); if (err) goto done; err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, __func__); cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); done: cm_id->rem_ref(cm_id); c4iw_put_ep(&ep->com); return err; } int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) { int ret = 0; int close = 0; int fatal = 0; struct c4iw_rdev *rdev; mutex_lock(&ep->com.mutex); PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, states[ep->com.state], abrupt); rdev = &ep->com.dev->rdev; if (c4iw_fatal_error(rdev)) { fatal = 1; close_complete_upcall(ep); ep->com.state = DEAD; } switch (ep->com.state) { case MPA_REQ_WAIT: case MPA_REQ_SENT: case MPA_REQ_RCVD: case MPA_REP_SENT: case FPDU_MODE: close = 1; if (abrupt) ep->com.state = ABORTING; else { ep->com.state = CLOSING; start_ep_timer(ep); } set_bit(CLOSE_SENT, &ep->com.flags); break; case CLOSING: if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { close = 1; if (abrupt) { stop_ep_timer(ep); ep->com.state = ABORTING; } else ep->com.state = MORIBUND; } break; case MORIBUND: case ABORTING: case DEAD: PDBG("%s ignoring disconnect ep %p state %u\n", __func__, ep, ep->com.state); break; default: BUG(); break; } if (close) { if (abrupt) { close_complete_upcall(ep); ret = send_abort(ep, NULL, gfp); } else ret = send_halfclose(ep, gfp); if (ret) fatal = 1; } mutex_unlock(&ep->com.mutex); if (fatal) release_ep_resources(ep); return ret; } static int async_event(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_fw6_msg *rpl = cplhdr(skb); c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); return 0; } /* * These are the real handlers that are called from a * work queue. */ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { [CPL_ACT_ESTABLISH] = act_establish, [CPL_ACT_OPEN_RPL] = act_open_rpl, [CPL_RX_DATA] = rx_data, [CPL_ABORT_RPL_RSS] = abort_rpl, [CPL_ABORT_RPL] = abort_rpl, [CPL_PASS_OPEN_RPL] = pass_open_rpl, [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, [CPL_PASS_ACCEPT_REQ] = pass_accept_req, [CPL_PASS_ESTABLISH] = pass_establish, [CPL_PEER_CLOSE] = peer_close, [CPL_ABORT_REQ_RSS] = peer_abort, [CPL_CLOSE_CON_RPL] = close_con_rpl, [CPL_RDMA_TERMINATE] = terminate, [CPL_FW4_ACK] = fw4_ack, [CPL_FW6_MSG] = async_event }; static void process_timeout(struct c4iw_ep *ep) { struct c4iw_qp_attributes attrs; int abort = 1; mutex_lock(&ep->com.mutex); PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, ep->com.state); switch (ep->com.state) { case MPA_REQ_SENT: __state_set(&ep->com, ABORTING); connect_reply_upcall(ep, -ETIMEDOUT); break; case MPA_REQ_WAIT: __state_set(&ep->com, ABORTING); break; case CLOSING: case MORIBUND: if (ep->com.cm_id && ep->com.qp) { attrs.next_state = C4IW_QP_STATE_ERROR; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } __state_set(&ep->com, ABORTING); break; default: printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n", __func__, ep, ep->hwtid, ep->com.state); WARN_ON(1); abort = 0; } mutex_unlock(&ep->com.mutex); if (abort) abort_connection(ep, NULL, GFP_KERNEL); c4iw_put_ep(&ep->com); } static void process_timedout_eps(void) { struct c4iw_ep *ep; spin_lock_irq(&timeout_lock); while (!list_empty(&timeout_list)) { struct list_head *tmp; tmp = timeout_list.next; list_del(tmp); spin_unlock_irq(&timeout_lock); ep = list_entry(tmp, struct c4iw_ep, entry); process_timeout(ep); spin_lock_irq(&timeout_lock); } spin_unlock_irq(&timeout_lock); } static void process_work(struct work_struct *work) { struct sk_buff *skb = NULL; struct c4iw_dev *dev; struct cpl_act_establish *rpl; unsigned int opcode; int ret; while ((skb = skb_dequeue(&rxq))) { rpl = cplhdr(skb); dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); opcode = rpl->ot.opcode; BUG_ON(!work_handlers[opcode]); ret = work_handlers[opcode](dev, skb); if (!ret) kfree_skb(skb); } process_timedout_eps(); } static DECLARE_WORK(skb_work, process_work); static void ep_timeout(unsigned long arg) { struct c4iw_ep *ep = (struct c4iw_ep *)arg; spin_lock(&timeout_lock); list_add_tail(&ep->entry, &timeout_list); spin_unlock(&timeout_lock); queue_work(workq, &skb_work); } /* * All the CM events are handled on a work queue to have a safe context. */ static int sched(struct c4iw_dev *dev, struct sk_buff *skb) { /* * Save dev in the skb->cb area. */ *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; /* * Queue the skb and schedule the worker thread. */ skb_queue_tail(&rxq, skb); queue_work(workq, &skb_work); return 0; } static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_set_tcb_rpl *rpl = cplhdr(skb); if (rpl->status != CPL_ERR_NONE) { printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " "for tid %u\n", rpl->status, GET_TID(rpl)); } kfree_skb(skb); return 0; } static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_fw6_msg *rpl = cplhdr(skb); struct c4iw_wr_wait *wr_waitp; int ret; PDBG("%s type %u\n", __func__, rpl->type); switch (rpl->type) { case 1: ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); if (wr_waitp) c4iw_wake_up(wr_waitp, ret ? -ret : 0); kfree_skb(skb); break; case 2: sched(dev, skb); break; default: printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, rpl->type); kfree_skb(skb); break; } return 0; } static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_abort_req_rss *req = cplhdr(skb); struct c4iw_ep *ep; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(req); ep = lookup_tid(t, tid); if (!ep) { printk(KERN_WARNING MOD "Abort on non-existent endpoint, tid %d\n", tid); kfree_skb(skb); return 0; } if (is_neg_adv_abort(req->status)) { PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, ep->hwtid); kfree_skb(skb); return 0; } PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, ep->com.state); /* * Wake up any threads in rdma_init() or rdma_fini(). */ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); sched(dev, skb); return 0; } /* * Most upcalls from the T4 Core go to sched() to * schedule the processing on a work queue. */ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { [CPL_ACT_ESTABLISH] = sched, [CPL_ACT_OPEN_RPL] = sched, [CPL_RX_DATA] = sched, [CPL_ABORT_RPL_RSS] = sched, [CPL_ABORT_RPL] = sched, [CPL_PASS_OPEN_RPL] = sched, [CPL_CLOSE_LISTSRV_RPL] = sched, [CPL_PASS_ACCEPT_REQ] = sched, [CPL_PASS_ESTABLISH] = sched, [CPL_PEER_CLOSE] = sched, [CPL_CLOSE_CON_RPL] = sched, [CPL_ABORT_REQ_RSS] = peer_abort_intr, [CPL_RDMA_TERMINATE] = sched, [CPL_FW4_ACK] = sched, [CPL_SET_TCB_RPL] = set_tcb_rpl, [CPL_FW6_MSG] = fw6_msg }; int __init c4iw_cm_init(void) { spin_lock_init(&timeout_lock); skb_queue_head_init(&rxq); workq = create_singlethread_workqueue("iw_cxgb4"); if (!workq) return -ENOMEM; return 0; } void __exit c4iw_cm_term(void) { WARN_ON(!list_empty(&timeout_list)); flush_workqueue(workq); destroy_workqueue(workq); }
gpl-2.0
netico-solutions/linux-urtu-bb
drivers/w1/slaves/w1_ds2431.c
2040
7185
/* * w1_ds2431.c - w1 family 2d (DS2431) driver * * Copyright (c) 2008 Bernhard Weirich <bernhard.weirich@riedel.net> * * Heavily inspired by w1_DS2433 driver from Ben Gardner <bgardner@wabtec.com> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/types.h> #include <linux/delay.h> #include "../w1.h" #include "../w1_int.h" #include "../w1_family.h" #define W1_F2D_EEPROM_SIZE 128 #define W1_F2D_PAGE_COUNT 4 #define W1_F2D_PAGE_BITS 5 #define W1_F2D_PAGE_SIZE (1<<W1_F2D_PAGE_BITS) #define W1_F2D_PAGE_MASK 0x1F #define W1_F2D_SCRATCH_BITS 3 #define W1_F2D_SCRATCH_SIZE (1<<W1_F2D_SCRATCH_BITS) #define W1_F2D_SCRATCH_MASK (W1_F2D_SCRATCH_SIZE-1) #define W1_F2D_READ_EEPROM 0xF0 #define W1_F2D_WRITE_SCRATCH 0x0F #define W1_F2D_READ_SCRATCH 0xAA #define W1_F2D_COPY_SCRATCH 0x55 #define W1_F2D_TPROG_MS 11 #define W1_F2D_READ_RETRIES 10 #define W1_F2D_READ_MAXLEN 8 /* * Check the file size bounds and adjusts count as needed. * This would not be needed if the file size didn't reset to 0 after a write. */ static inline size_t w1_f2d_fix_count(loff_t off, size_t count, size_t size) { if (off > size) return 0; if ((off + count) > size) return size - off; return count; } /* * Read a block from W1 ROM two times and compares the results. * If they are equal they are returned, otherwise the read * is repeated W1_F2D_READ_RETRIES times. * * count must not exceed W1_F2D_READ_MAXLEN. */ static int w1_f2d_readblock(struct w1_slave *sl, int off, int count, char *buf) { u8 wrbuf[3]; u8 cmp[W1_F2D_READ_MAXLEN]; int tries = W1_F2D_READ_RETRIES; do { wrbuf[0] = W1_F2D_READ_EEPROM; wrbuf[1] = off & 0xff; wrbuf[2] = off >> 8; if (w1_reset_select_slave(sl)) return -1; w1_write_block(sl->master, wrbuf, 3); w1_read_block(sl->master, buf, count); if (w1_reset_select_slave(sl)) return -1; w1_write_block(sl->master, wrbuf, 3); w1_read_block(sl->master, cmp, count); if (!memcmp(cmp, buf, count)) return 0; } while (--tries); dev_err(&sl->dev, "proof reading failed %d times\n", W1_F2D_READ_RETRIES); return -1; } static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); int todo = count; count = w1_f2d_fix_count(off, count, W1_F2D_EEPROM_SIZE); if (count == 0) return 0; mutex_lock(&sl->master->bus_mutex); /* read directly from the EEPROM in chunks of W1_F2D_READ_MAXLEN */ while (todo > 0) { int block_read; if (todo >= W1_F2D_READ_MAXLEN) block_read = W1_F2D_READ_MAXLEN; else block_read = todo; if (w1_f2d_readblock(sl, off, block_read, buf) < 0) count = -EIO; todo -= W1_F2D_READ_MAXLEN; buf += W1_F2D_READ_MAXLEN; off += W1_F2D_READ_MAXLEN; } mutex_unlock(&sl->master->bus_mutex); return count; } /* * Writes to the scratchpad and reads it back for verification. * Then copies the scratchpad to EEPROM. * The data must be aligned at W1_F2D_SCRATCH_SIZE bytes and * must be W1_F2D_SCRATCH_SIZE bytes long. * The master must be locked. * * @param sl The slave structure * @param addr Address for the write * @param len length must be <= (W1_F2D_PAGE_SIZE - (addr & W1_F2D_PAGE_MASK)) * @param data The data to write * @return 0=Success -1=failure */ static int w1_f2d_write(struct w1_slave *sl, int addr, int len, const u8 *data) { int tries = W1_F2D_READ_RETRIES; u8 wrbuf[4]; u8 rdbuf[W1_F2D_SCRATCH_SIZE + 3]; u8 es = (addr + len - 1) % W1_F2D_SCRATCH_SIZE; retry: /* Write the data to the scratchpad */ if (w1_reset_select_slave(sl)) return -1; wrbuf[0] = W1_F2D_WRITE_SCRATCH; wrbuf[1] = addr & 0xff; wrbuf[2] = addr >> 8; w1_write_block(sl->master, wrbuf, 3); w1_write_block(sl->master, data, len); /* Read the scratchpad and verify */ if (w1_reset_select_slave(sl)) return -1; w1_write_8(sl->master, W1_F2D_READ_SCRATCH); w1_read_block(sl->master, rdbuf, len + 3); /* Compare what was read against the data written */ if ((rdbuf[0] != wrbuf[1]) || (rdbuf[1] != wrbuf[2]) || (rdbuf[2] != es) || (memcmp(data, &rdbuf[3], len) != 0)) { if (--tries) goto retry; dev_err(&sl->dev, "could not write to eeprom, scratchpad compare failed %d times\n", W1_F2D_READ_RETRIES); return -1; } /* Copy the scratchpad to EEPROM */ if (w1_reset_select_slave(sl)) return -1; wrbuf[0] = W1_F2D_COPY_SCRATCH; wrbuf[3] = es; w1_write_block(sl->master, wrbuf, 4); /* Sleep for tprog ms to wait for the write to complete */ msleep(W1_F2D_TPROG_MS); /* Reset the bus to wake up the EEPROM */ w1_reset_bus(sl->master); return 0; } static ssize_t eeprom_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); int addr, len; int copy; count = w1_f2d_fix_count(off, count, W1_F2D_EEPROM_SIZE); if (count == 0) return 0; mutex_lock(&sl->master->bus_mutex); /* Can only write data in blocks of the size of the scratchpad */ addr = off; len = count; while (len > 0) { /* if len too short or addr not aligned */ if (len < W1_F2D_SCRATCH_SIZE || addr & W1_F2D_SCRATCH_MASK) { char tmp[W1_F2D_SCRATCH_SIZE]; /* read the block and update the parts to be written */ if (w1_f2d_readblock(sl, addr & ~W1_F2D_SCRATCH_MASK, W1_F2D_SCRATCH_SIZE, tmp)) { count = -EIO; goto out_up; } /* copy at most to the boundary of the PAGE or len */ copy = W1_F2D_SCRATCH_SIZE - (addr & W1_F2D_SCRATCH_MASK); if (copy > len) copy = len; memcpy(&tmp[addr & W1_F2D_SCRATCH_MASK], buf, copy); if (w1_f2d_write(sl, addr & ~W1_F2D_SCRATCH_MASK, W1_F2D_SCRATCH_SIZE, tmp) < 0) { count = -EIO; goto out_up; } } else { copy = W1_F2D_SCRATCH_SIZE; if (w1_f2d_write(sl, addr, copy, buf) < 0) { count = -EIO; goto out_up; } } buf += copy; addr += copy; len -= copy; } out_up: mutex_unlock(&sl->master->bus_mutex); return count; } static BIN_ATTR_RW(eeprom, W1_F2D_EEPROM_SIZE); static struct bin_attribute *w1_f2d_bin_attrs[] = { &bin_attr_eeprom, NULL, }; static const struct attribute_group w1_f2d_group = { .bin_attrs = w1_f2d_bin_attrs, }; static const struct attribute_group *w1_f2d_groups[] = { &w1_f2d_group, NULL, }; static struct w1_family_ops w1_f2d_fops = { .groups = w1_f2d_groups, }; static struct w1_family w1_family_2d = { .fid = W1_EEPROM_DS2431, .fops = &w1_f2d_fops, }; static int __init w1_f2d_init(void) { return w1_register_family(&w1_family_2d); } static void __exit w1_f2d_fini(void) { w1_unregister_family(&w1_family_2d); } module_init(w1_f2d_init); module_exit(w1_f2d_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Bernhard Weirich <bernhard.weirich@riedel.net>"); MODULE_DESCRIPTION("w1 family 2d driver for DS2431, 1kb EEPROM"); MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2431));
gpl-2.0
hroark13/hydro_kernel
drivers/pci/hotplug/pciehp_ctrl.c
2808
14889
/* * PCI Express Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/pci.h> #include "../pci.h" #include "pciehp.h" static void interrupt_event_handler(struct work_struct *work); static int queue_interrupt_event(struct slot *p_slot, u32 event_type) { struct event_info *info; info = kmalloc(sizeof(*info), GFP_ATOMIC); if (!info) return -ENOMEM; info->event_type = event_type; info->p_slot = p_slot; INIT_WORK(&info->work, interrupt_event_handler); queue_work(pciehp_wq, &info->work); return 0; } u8 pciehp_handle_attention_button(struct slot *p_slot) { u32 event_type; struct controller *ctrl = p_slot->ctrl; /* Attention Button Change */ ctrl_dbg(ctrl, "Attention button interrupt received\n"); /* * Button pressed - See if need to TAKE ACTION!!! */ ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot)); event_type = INT_BUTTON_PRESS; queue_interrupt_event(p_slot, event_type); return 0; } u8 pciehp_handle_switch_change(struct slot *p_slot) { u8 getstatus; u32 event_type; struct controller *ctrl = p_slot->ctrl; /* Switch Change */ ctrl_dbg(ctrl, "Switch interrupt received\n"); pciehp_get_latch_status(p_slot, &getstatus); if (getstatus) { /* * Switch opened */ ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot)); event_type = INT_SWITCH_OPEN; } else { /* * Switch closed */ ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot)); event_type = INT_SWITCH_CLOSE; } queue_interrupt_event(p_slot, event_type); return 1; } u8 pciehp_handle_presence_change(struct slot *p_slot) { u32 event_type; u8 presence_save; struct controller *ctrl = p_slot->ctrl; /* Presence Change */ ctrl_dbg(ctrl, "Presence/Notify input change\n"); /* Switch is open, assume a presence change * Save the presence state */ pciehp_get_adapter_status(p_slot, &presence_save); if (presence_save) { /* * Card Present */ ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot)); event_type = INT_PRESENCE_ON; } else { /* * Not Present */ ctrl_info(ctrl, "Card not present on Slot(%s)\n", slot_name(p_slot)); event_type = INT_PRESENCE_OFF; } queue_interrupt_event(p_slot, event_type); return 1; } u8 pciehp_handle_power_fault(struct slot *p_slot) { u32 event_type; struct controller *ctrl = p_slot->ctrl; /* power fault */ ctrl_dbg(ctrl, "Power fault interrupt received\n"); ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot)); event_type = INT_POWER_FAULT; ctrl_info(ctrl, "Power fault bit %x set\n", 0); queue_interrupt_event(p_slot, event_type); return 1; } /* The following routines constitute the bulk of the hotplug controller logic */ static void set_slot_off(struct controller *ctrl, struct slot * pslot) { /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ if (POWER_CTRL(ctrl)) { if (pciehp_power_off_slot(pslot)) { ctrl_err(ctrl, "Issue of Slot Power Off command failed\n"); return; } /* * After turning power off, we must wait for at least 1 second * before taking any action that relies on power having been * removed from the slot/adapter. */ msleep(1000); } if (PWR_LED(ctrl)) pciehp_green_led_off(pslot); if (ATTN_LED(ctrl)) { if (pciehp_set_attention_status(pslot, 1)) { ctrl_err(ctrl, "Issue of Set Attention Led command failed\n"); return; } } } /** * board_added - Called after a board has been added to the system. * @p_slot: &slot where board is added * * Turns power on for the board. * Configures board. */ static int board_added(struct slot *p_slot) { int retval = 0; struct controller *ctrl = p_slot->ctrl; struct pci_bus *parent = ctrl->pcie->port->subordinate; if (POWER_CTRL(ctrl)) { /* Power on slot */ retval = pciehp_power_on_slot(p_slot); if (retval) return retval; } if (PWR_LED(ctrl)) pciehp_green_led_blink(p_slot); /* Check link training status */ retval = pciehp_check_link_status(ctrl); if (retval) { ctrl_err(ctrl, "Failed to check link status\n"); goto err_exit; } /* Check for a power fault */ if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) { ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot)); retval = -EIO; goto err_exit; } retval = pciehp_configure_device(p_slot); if (retval) { ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n", pci_domain_nr(parent), parent->number); goto err_exit; } if (PWR_LED(ctrl)) pciehp_green_led_on(p_slot); return 0; err_exit: set_slot_off(ctrl, p_slot); return retval; } /** * remove_board - Turns off slot and LEDs * @p_slot: slot where board is being removed */ static int remove_board(struct slot *p_slot) { int retval = 0; struct controller *ctrl = p_slot->ctrl; retval = pciehp_unconfigure_device(p_slot); if (retval) return retval; if (POWER_CTRL(ctrl)) { /* power off slot */ retval = pciehp_power_off_slot(p_slot); if (retval) { ctrl_err(ctrl, "Issue of Slot Disable command failed\n"); return retval; } /* * After turning power off, we must wait for at least 1 second * before taking any action that relies on power having been * removed from the slot/adapter. */ msleep(1000); } /* turn off Green LED */ if (PWR_LED(ctrl)) pciehp_green_led_off(p_slot); return 0; } struct power_work_info { struct slot *p_slot; struct work_struct work; }; /** * pciehp_power_thread - handle pushbutton events * @work: &struct work_struct describing work to be done * * Scheduled procedure to handle blocking stuff for the pushbuttons. * Handles all pending events and exits. */ static void pciehp_power_thread(struct work_struct *work) { struct power_work_info *info = container_of(work, struct power_work_info, work); struct slot *p_slot = info->p_slot; mutex_lock(&p_slot->lock); switch (p_slot->state) { case POWEROFF_STATE: mutex_unlock(&p_slot->lock); ctrl_dbg(p_slot->ctrl, "Disabling domain:bus:device=%04x:%02x:00\n", pci_domain_nr(p_slot->ctrl->pcie->port->subordinate), p_slot->ctrl->pcie->port->subordinate->number); pciehp_disable_slot(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; case POWERON_STATE: mutex_unlock(&p_slot->lock); if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl)) pciehp_green_led_off(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; default: break; } mutex_unlock(&p_slot->lock); kfree(info); } void pciehp_queue_pushbutton_work(struct work_struct *work) { struct slot *p_slot = container_of(work, struct slot, work.work); struct power_work_info *info; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n", __func__); return; } info->p_slot = p_slot; INIT_WORK(&info->work, pciehp_power_thread); mutex_lock(&p_slot->lock); switch (p_slot->state) { case BLINKINGOFF_STATE: p_slot->state = POWEROFF_STATE; break; case BLINKINGON_STATE: p_slot->state = POWERON_STATE; break; default: kfree(info); goto out; } queue_work(pciehp_ordered_wq, &info->work); out: mutex_unlock(&p_slot->lock); } /* * Note: This function must be called with slot->lock held */ static void handle_button_press_event(struct slot *p_slot) { struct controller *ctrl = p_slot->ctrl; u8 getstatus; switch (p_slot->state) { case STATIC_STATE: pciehp_get_power_status(p_slot, &getstatus); if (getstatus) { p_slot->state = BLINKINGOFF_STATE; ctrl_info(ctrl, "PCI slot #%s - powering off due to button " "press.\n", slot_name(p_slot)); } else { p_slot->state = BLINKINGON_STATE; ctrl_info(ctrl, "PCI slot #%s - powering on due to button " "press.\n", slot_name(p_slot)); } /* blink green LED and turn off amber */ if (PWR_LED(ctrl)) pciehp_green_led_blink(p_slot); if (ATTN_LED(ctrl)) pciehp_set_attention_status(p_slot, 0); queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ); break; case BLINKINGOFF_STATE: case BLINKINGON_STATE: /* * Cancel if we are still blinking; this means that we * press the attention again before the 5 sec. limit * expires to cancel hot-add or hot-remove */ ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot)); cancel_delayed_work(&p_slot->work); if (p_slot->state == BLINKINGOFF_STATE) { if (PWR_LED(ctrl)) pciehp_green_led_on(p_slot); } else { if (PWR_LED(ctrl)) pciehp_green_led_off(p_slot); } if (ATTN_LED(ctrl)) pciehp_set_attention_status(p_slot, 0); ctrl_info(ctrl, "PCI slot #%s - action canceled " "due to button press\n", slot_name(p_slot)); p_slot->state = STATIC_STATE; break; case POWEROFF_STATE: case POWERON_STATE: /* * Ignore if the slot is on power-on or power-off state; * this means that the previous attention button action * to hot-add or hot-remove is undergoing */ ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot)); break; default: ctrl_warn(ctrl, "Not a valid state\n"); break; } } /* * Note: This function must be called with slot->lock held */ static void handle_surprise_event(struct slot *p_slot) { u8 getstatus; struct power_work_info *info; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n", __func__); return; } info->p_slot = p_slot; INIT_WORK(&info->work, pciehp_power_thread); pciehp_get_adapter_status(p_slot, &getstatus); if (!getstatus) p_slot->state = POWEROFF_STATE; else p_slot->state = POWERON_STATE; queue_work(pciehp_ordered_wq, &info->work); } static void interrupt_event_handler(struct work_struct *work) { struct event_info *info = container_of(work, struct event_info, work); struct slot *p_slot = info->p_slot; struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (info->event_type) { case INT_BUTTON_PRESS: handle_button_press_event(p_slot); break; case INT_POWER_FAULT: if (!POWER_CTRL(ctrl)) break; if (ATTN_LED(ctrl)) pciehp_set_attention_status(p_slot, 1); if (PWR_LED(ctrl)) pciehp_green_led_off(p_slot); break; case INT_PRESENCE_ON: case INT_PRESENCE_OFF: if (!HP_SUPR_RM(ctrl)) break; ctrl_dbg(ctrl, "Surprise Removal\n"); handle_surprise_event(p_slot); break; default: break; } mutex_unlock(&p_slot->lock); kfree(info); } int pciehp_enable_slot(struct slot *p_slot) { u8 getstatus = 0; int rc; struct controller *ctrl = p_slot->ctrl; rc = pciehp_get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); return -ENODEV; } if (MRL_SENS(p_slot->ctrl)) { rc = pciehp_get_latch_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot)); return -ENODEV; } } if (POWER_CTRL(p_slot->ctrl)) { rc = pciehp_get_power_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Already enabled on slot(%s)\n", slot_name(p_slot)); return -EINVAL; } } pciehp_get_latch_status(p_slot, &getstatus); rc = board_added(p_slot); if (rc) { pciehp_get_latch_status(p_slot, &getstatus); } return rc; } int pciehp_disable_slot(struct slot *p_slot) { u8 getstatus = 0; int ret = 0; struct controller *ctrl = p_slot->ctrl; if (!p_slot->ctrl) return 1; if (!HP_SUPR_RM(p_slot->ctrl)) { ret = pciehp_get_adapter_status(p_slot, &getstatus); if (ret || !getstatus) { ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); return -ENODEV; } } if (MRL_SENS(p_slot->ctrl)) { ret = pciehp_get_latch_status(p_slot, &getstatus); if (ret || getstatus) { ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot)); return -ENODEV; } } if (POWER_CTRL(p_slot->ctrl)) { ret = pciehp_get_power_status(p_slot, &getstatus); if (ret || !getstatus) { ctrl_info(ctrl, "Already disabled on slot(%s)\n", slot_name(p_slot)); return -EINVAL; } } return remove_board(p_slot); } int pciehp_sysfs_enable_slot(struct slot *p_slot) { int retval = -ENODEV; struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (p_slot->state) { case BLINKINGON_STATE: cancel_delayed_work(&p_slot->work); case STATIC_STATE: p_slot->state = POWERON_STATE; mutex_unlock(&p_slot->lock); retval = pciehp_enable_slot(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; case POWERON_STATE: ctrl_info(ctrl, "Slot %s is already in powering on state\n", slot_name(p_slot)); break; case BLINKINGOFF_STATE: case POWEROFF_STATE: ctrl_info(ctrl, "Already enabled on slot %s\n", slot_name(p_slot)); break; default: ctrl_err(ctrl, "Not a valid state on slot %s\n", slot_name(p_slot)); break; } mutex_unlock(&p_slot->lock); return retval; } int pciehp_sysfs_disable_slot(struct slot *p_slot) { int retval = -ENODEV; struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (p_slot->state) { case BLINKINGOFF_STATE: cancel_delayed_work(&p_slot->work); case STATIC_STATE: p_slot->state = POWEROFF_STATE; mutex_unlock(&p_slot->lock); retval = pciehp_disable_slot(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; case POWEROFF_STATE: ctrl_info(ctrl, "Slot %s is already in powering off state\n", slot_name(p_slot)); break; case BLINKINGON_STATE: case POWERON_STATE: ctrl_info(ctrl, "Already disabled on slot %s\n", slot_name(p_slot)); break; default: ctrl_err(ctrl, "Not a valid state on slot %s\n", slot_name(p_slot)); break; } mutex_unlock(&p_slot->lock); return retval; }
gpl-2.0
santod/google_kernel_m7_3.4.10-g1a25406
drivers/media/dvb/ngene/ngene-cards.c
4856
16405
/* * ngene-cards.c: nGene PCIe bridge driver - card specific info * * Copyright (C) 2005-2007 Micronas * * Copyright (C) 2008-2009 Ralph Metzler <rjkm@metzlerbros.de> * Modifications for new nGene firmware, * support for EEPROM-copying, * support for new dual DVB-S2 card prototype * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 only, as published by the Free Software Foundation. * * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA * Or, point your browser to http://www.gnu.org/copyleft/gpl.html */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include "ngene.h" /* demods/tuners */ #include "stv6110x.h" #include "stv090x.h" #include "lnbh24.h" #include "lgdt330x.h" #include "mt2131.h" #include "tda18271c2dd.h" #include "drxk.h" /****************************************************************************/ /* Demod/tuner attachment ***************************************************/ /****************************************************************************/ static int tuner_attach_stv6110(struct ngene_channel *chan) { struct i2c_adapter *i2c; struct stv090x_config *feconf = (struct stv090x_config *) chan->dev->card_info->fe_config[chan->number]; struct stv6110x_config *tunerconf = (struct stv6110x_config *) chan->dev->card_info->tuner_config[chan->number]; struct stv6110x_devctl *ctl; /* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */ if (chan->number < 2) i2c = &chan->dev->channel[0].i2c_adapter; else i2c = &chan->dev->channel[1].i2c_adapter; ctl = dvb_attach(stv6110x_attach, chan->fe, tunerconf, i2c); if (ctl == NULL) { printk(KERN_ERR DEVICE_NAME ": No STV6110X found!\n"); return -ENODEV; } feconf->tuner_init = ctl->tuner_init; feconf->tuner_sleep = ctl->tuner_sleep; feconf->tuner_set_mode = ctl->tuner_set_mode; feconf->tuner_set_frequency = ctl->tuner_set_frequency; feconf->tuner_get_frequency = ctl->tuner_get_frequency; feconf->tuner_set_bandwidth = ctl->tuner_set_bandwidth; feconf->tuner_get_bandwidth = ctl->tuner_get_bandwidth; feconf->tuner_set_bbgain = ctl->tuner_set_bbgain; feconf->tuner_get_bbgain = ctl->tuner_get_bbgain; feconf->tuner_set_refclk = ctl->tuner_set_refclk; feconf->tuner_get_status = ctl->tuner_get_status; return 0; } static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable) { struct ngene_channel *chan = fe->sec_priv; int status; if (enable) { down(&chan->dev->pll_mutex); status = chan->gate_ctrl(fe, 1); } else { status = chan->gate_ctrl(fe, 0); up(&chan->dev->pll_mutex); } return status; } static int tuner_attach_tda18271(struct ngene_channel *chan) { struct i2c_adapter *i2c; struct dvb_frontend *fe; i2c = &chan->dev->channel[0].i2c_adapter; if (chan->fe->ops.i2c_gate_ctrl) chan->fe->ops.i2c_gate_ctrl(chan->fe, 1); fe = dvb_attach(tda18271c2dd_attach, chan->fe, i2c, 0x60); if (chan->fe->ops.i2c_gate_ctrl) chan->fe->ops.i2c_gate_ctrl(chan->fe, 0); if (!fe) { printk(KERN_ERR "No TDA18271 found!\n"); return -ENODEV; } return 0; } static int tuner_attach_probe(struct ngene_channel *chan) { if (chan->demod_type == 0) return tuner_attach_stv6110(chan); if (chan->demod_type == 1) return tuner_attach_tda18271(chan); return -EINVAL; } static int demod_attach_stv0900(struct ngene_channel *chan) { struct i2c_adapter *i2c; struct stv090x_config *feconf = (struct stv090x_config *) chan->dev->card_info->fe_config[chan->number]; /* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */ /* Note: Both adapters share the same i2c bus, but the demod */ /* driver requires that each demod has its own i2c adapter */ if (chan->number < 2) i2c = &chan->dev->channel[0].i2c_adapter; else i2c = &chan->dev->channel[1].i2c_adapter; chan->fe = dvb_attach(stv090x_attach, feconf, i2c, (chan->number & 1) == 0 ? STV090x_DEMODULATOR_0 : STV090x_DEMODULATOR_1); if (chan->fe == NULL) { printk(KERN_ERR DEVICE_NAME ": No STV0900 found!\n"); return -ENODEV; } /* store channel info */ if (feconf->tuner_i2c_lock) chan->fe->analog_demod_priv = chan; if (!dvb_attach(lnbh24_attach, chan->fe, i2c, 0, 0, chan->dev->card_info->lnb[chan->number])) { printk(KERN_ERR DEVICE_NAME ": No LNBH24 found!\n"); dvb_frontend_detach(chan->fe); chan->fe = NULL; return -ENODEV; } return 0; } static void cineS2_tuner_i2c_lock(struct dvb_frontend *fe, int lock) { struct ngene_channel *chan = fe->analog_demod_priv; if (lock) down(&chan->dev->pll_mutex); else up(&chan->dev->pll_mutex); } static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val) { struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD, .buf = val, .len = 1 } }; return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1; } static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr, u16 reg, u8 *val) { u8 msg[2] = {reg>>8, reg&0xff}; struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0, .buf = msg, .len = 2}, {.addr = adr, .flags = I2C_M_RD, .buf = val, .len = 1} }; return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1; } static int port_has_stv0900(struct i2c_adapter *i2c, int port) { u8 val; if (i2c_read_reg16(i2c, 0x68+port/2, 0xf100, &val) < 0) return 0; return 1; } static int port_has_drxk(struct i2c_adapter *i2c, int port) { u8 val; if (i2c_read(i2c, 0x29+port, &val) < 0) return 0; return 1; } static int demod_attach_drxk(struct ngene_channel *chan, struct i2c_adapter *i2c) { struct drxk_config config; memset(&config, 0, sizeof(config)); config.microcode_name = "drxk_a3.mc"; config.adr = 0x29 + (chan->number ^ 2); chan->fe = dvb_attach(drxk_attach, &config, i2c); if (!chan->fe) { printk(KERN_ERR "No DRXK found!\n"); return -ENODEV; } chan->fe->sec_priv = chan; chan->gate_ctrl = chan->fe->ops.i2c_gate_ctrl; chan->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl; return 0; } static int cineS2_probe(struct ngene_channel *chan) { struct i2c_adapter *i2c; struct stv090x_config *fe_conf; u8 buf[3]; struct i2c_msg i2c_msg = { .flags = 0, .buf = buf }; int rc; /* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */ if (chan->number < 2) i2c = &chan->dev->channel[0].i2c_adapter; else i2c = &chan->dev->channel[1].i2c_adapter; if (port_has_stv0900(i2c, chan->number)) { chan->demod_type = 0; fe_conf = chan->dev->card_info->fe_config[chan->number]; /* demod found, attach it */ rc = demod_attach_stv0900(chan); if (rc < 0 || chan->number < 2) return rc; /* demod #2: reprogram outputs DPN1 & DPN2 */ i2c_msg.addr = fe_conf->address; i2c_msg.len = 3; buf[0] = 0xf1; switch (chan->number) { case 2: buf[1] = 0x5c; buf[2] = 0xc2; break; case 3: buf[1] = 0x61; buf[2] = 0xcc; break; default: return -ENODEV; } rc = i2c_transfer(i2c, &i2c_msg, 1); if (rc != 1) { printk(KERN_ERR DEVICE_NAME ": could not setup DPNx\n"); return -EIO; } } else if (port_has_drxk(i2c, chan->number^2)) { chan->demod_type = 1; demod_attach_drxk(chan, i2c); } else { printk(KERN_ERR "No demod found on chan %d\n", chan->number); return -ENODEV; } return 0; } static struct lgdt330x_config aver_m780 = { .demod_address = 0xb2 >> 1, .demod_chip = LGDT3303, .serial_mpeg = 0x00, /* PARALLEL */ .clock_polarity_flip = 1, }; static struct mt2131_config m780_tunerconfig = { 0xc0 >> 1 }; /* A single func to attach the demo and tuner, rather than * use two sep funcs like the current design mandates. */ static int demod_attach_lg330x(struct ngene_channel *chan) { chan->fe = dvb_attach(lgdt330x_attach, &aver_m780, &chan->i2c_adapter); if (chan->fe == NULL) { printk(KERN_ERR DEVICE_NAME ": No LGDT330x found!\n"); return -ENODEV; } dvb_attach(mt2131_attach, chan->fe, &chan->i2c_adapter, &m780_tunerconfig, 0); return (chan->fe) ? 0 : -ENODEV; } /****************************************************************************/ /* Switch control (I2C gates, etc.) *****************************************/ /****************************************************************************/ static struct stv090x_config fe_cineS2 = { .device = STV0900, .demod_mode = STV090x_DUAL, .clk_mode = STV090x_CLK_EXT, .xtal = 27000000, .address = 0x68, .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED, .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED, .repeater_level = STV090x_RPTLEVEL_16, .adc1_range = STV090x_ADC_1Vpp, .adc2_range = STV090x_ADC_1Vpp, .diseqc_envelope_mode = true, .tuner_i2c_lock = cineS2_tuner_i2c_lock, }; static struct stv090x_config fe_cineS2_2 = { .device = STV0900, .demod_mode = STV090x_DUAL, .clk_mode = STV090x_CLK_EXT, .xtal = 27000000, .address = 0x69, .ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED, .ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED, .repeater_level = STV090x_RPTLEVEL_16, .adc1_range = STV090x_ADC_1Vpp, .adc2_range = STV090x_ADC_1Vpp, .diseqc_envelope_mode = true, .tuner_i2c_lock = cineS2_tuner_i2c_lock, }; static struct stv6110x_config tuner_cineS2_0 = { .addr = 0x60, .refclk = 27000000, .clk_div = 1, }; static struct stv6110x_config tuner_cineS2_1 = { .addr = 0x63, .refclk = 27000000, .clk_div = 1, }; static struct ngene_info ngene_info_cineS2 = { .type = NGENE_SIDEWINDER, .name = "Linux4Media cineS2 DVB-S2 Twin Tuner", .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN}, .demod_attach = {demod_attach_stv0900, demod_attach_stv0900}, .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110}, .fe_config = {&fe_cineS2, &fe_cineS2}, .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1}, .lnb = {0x0b, 0x08}, .tsf = {3, 3}, .fw_version = 18, .msi_supported = true, }; static struct ngene_info ngene_info_satixS2 = { .type = NGENE_SIDEWINDER, .name = "Mystique SaTiX-S2 Dual", .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN}, .demod_attach = {demod_attach_stv0900, demod_attach_stv0900}, .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110}, .fe_config = {&fe_cineS2, &fe_cineS2}, .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1}, .lnb = {0x0b, 0x08}, .tsf = {3, 3}, .fw_version = 18, .msi_supported = true, }; static struct ngene_info ngene_info_satixS2v2 = { .type = NGENE_SIDEWINDER, .name = "Mystique SaTiX-S2 Dual (v2)", .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSOUT}, .demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe}, .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_probe, tuner_attach_probe}, .fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2}, .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1}, .lnb = {0x0a, 0x08, 0x0b, 0x09}, .tsf = {3, 3}, .fw_version = 18, .msi_supported = true, }; static struct ngene_info ngene_info_cineS2v5 = { .type = NGENE_SIDEWINDER, .name = "Linux4Media cineS2 DVB-S2 Twin Tuner (v5)", .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSOUT}, .demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe}, .tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_probe, tuner_attach_probe}, .fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2}, .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1}, .lnb = {0x0a, 0x08, 0x0b, 0x09}, .tsf = {3, 3}, .fw_version = 18, .msi_supported = true, }; static struct ngene_info ngene_info_duoFlex = { .type = NGENE_SIDEWINDER, .name = "Digital Devices DuoFlex PCIe or miniPCIe", .io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSOUT}, .demod_attach = {cineS2_probe, cineS2_probe, cineS2_probe, cineS2_probe}, .tuner_attach = {tuner_attach_probe, tuner_attach_probe, tuner_attach_probe, tuner_attach_probe}, .fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2}, .tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1}, .lnb = {0x0a, 0x08, 0x0b, 0x09}, .tsf = {3, 3}, .fw_version = 18, .msi_supported = true, }; static struct ngene_info ngene_info_m780 = { .type = NGENE_APP, .name = "Aver M780 ATSC/QAM-B", /* Channel 0 is analog, which is currently unsupported */ .io_type = { NGENE_IO_NONE, NGENE_IO_TSIN }, .demod_attach = { NULL, demod_attach_lg330x }, /* Ensure these are NULL else the frame will call them (as funcs) */ .tuner_attach = { 0, 0, 0, 0 }, .fe_config = { NULL, &aver_m780 }, .avf = { 0 }, /* A custom electrical interface config for the demod to bridge */ .tsf = { 4, 4 }, .fw_version = 15, }; /****************************************************************************/ /****************************************************************************/ /* PCI Subsystem ID *********************************************************/ /****************************************************************************/ #define NGENE_ID(_subvend, _subdev, _driverdata) { \ .vendor = NGENE_VID, .device = NGENE_PID, \ .subvendor = _subvend, .subdevice = _subdev, \ .driver_data = (unsigned long) &_driverdata } /****************************************************************************/ static const struct pci_device_id ngene_id_tbl[] __devinitdata = { NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2), NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2), NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2), NGENE_ID(0x18c3, 0xdb02, ngene_info_satixS2v2), NGENE_ID(0x18c3, 0xdd00, ngene_info_cineS2v5), NGENE_ID(0x18c3, 0xdd10, ngene_info_duoFlex), NGENE_ID(0x18c3, 0xdd20, ngene_info_duoFlex), NGENE_ID(0x1461, 0x062e, ngene_info_m780), {0} }; MODULE_DEVICE_TABLE(pci, ngene_id_tbl); /****************************************************************************/ /* Init/Exit ****************************************************************/ /****************************************************************************/ static pci_ers_result_t ngene_error_detected(struct pci_dev *dev, enum pci_channel_state state) { printk(KERN_ERR DEVICE_NAME ": PCI error\n"); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; if (state == pci_channel_io_frozen) return PCI_ERS_RESULT_NEED_RESET; return PCI_ERS_RESULT_CAN_RECOVER; } static pci_ers_result_t ngene_link_reset(struct pci_dev *dev) { printk(KERN_INFO DEVICE_NAME ": link reset\n"); return 0; } static pci_ers_result_t ngene_slot_reset(struct pci_dev *dev) { printk(KERN_INFO DEVICE_NAME ": slot reset\n"); return 0; } static void ngene_resume(struct pci_dev *dev) { printk(KERN_INFO DEVICE_NAME ": resume\n"); } static struct pci_error_handlers ngene_errors = { .error_detected = ngene_error_detected, .link_reset = ngene_link_reset, .slot_reset = ngene_slot_reset, .resume = ngene_resume, }; static struct pci_driver ngene_pci_driver = { .name = "ngene", .id_table = ngene_id_tbl, .probe = ngene_probe, .remove = __devexit_p(ngene_remove), .err_handler = &ngene_errors, .shutdown = ngene_shutdown, }; static __init int module_init_ngene(void) { printk(KERN_INFO "nGene PCIE bridge driver, Copyright (C) 2005-2007 Micronas\n"); return pci_register_driver(&ngene_pci_driver); } static __exit void module_exit_ngene(void) { pci_unregister_driver(&ngene_pci_driver); } module_init(module_init_ngene); module_exit(module_exit_ngene); MODULE_DESCRIPTION("nGene"); MODULE_AUTHOR("Micronas, Ralph Metzler, Manfred Voelkel"); MODULE_LICENSE("GPL");
gpl-2.0
v-superuser/android_kernel_sony_msm8x27
drivers/leds/leds-cobalt-qube.c
5112
2021
/* * Copyright 2006 - Florian Fainelli <florian@openwrt.org> * * Control the Cobalt Qube/RaQ front LED */ #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> #define LED_FRONT_LEFT 0x01 #define LED_FRONT_RIGHT 0x02 static void __iomem *led_port; static u8 led_value; static void qube_front_led_set(struct led_classdev *led_cdev, enum led_brightness brightness) { if (brightness) led_value = LED_FRONT_LEFT | LED_FRONT_RIGHT; else led_value = ~(LED_FRONT_LEFT | LED_FRONT_RIGHT); writeb(led_value, led_port); } static struct led_classdev qube_front_led = { .name = "qube::front", .brightness = LED_FULL, .brightness_set = qube_front_led_set, .default_trigger = "default-on", }; static int __devinit cobalt_qube_led_probe(struct platform_device *pdev) { struct resource *res; int retval; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EBUSY; led_port = ioremap(res->start, resource_size(res)); if (!led_port) return -ENOMEM; led_value = LED_FRONT_LEFT | LED_FRONT_RIGHT; writeb(led_value, led_port); retval = led_classdev_register(&pdev->dev, &qube_front_led); if (retval) goto err_iounmap; return 0; err_iounmap: iounmap(led_port); led_port = NULL; return retval; } static int __devexit cobalt_qube_led_remove(struct platform_device *pdev) { led_classdev_unregister(&qube_front_led); if (led_port) { iounmap(led_port); led_port = NULL; } return 0; } static struct platform_driver cobalt_qube_led_driver = { .probe = cobalt_qube_led_probe, .remove = __devexit_p(cobalt_qube_led_remove), .driver = { .name = "cobalt-qube-leds", .owner = THIS_MODULE, }, }; module_platform_driver(cobalt_qube_led_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Front LED support for Cobalt Server"); MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); MODULE_ALIAS("platform:cobalt-qube-leds");
gpl-2.0
techno/linux-stable-nvmswap
arch/arm/mach-mxs/devices/platform-auart.c
5624
1668
/* * Copyright (C) 2010 Pengutronix * Sascha Hauer <s.hauer@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <linux/dma-mapping.h> #include <asm/sizes.h> #include <mach/mx23.h> #include <mach/mx28.h> #include <mach/devices-common.h> #define mxs_auart_data_entry_single(soc, _id, hwid) \ { \ .id = _id, \ .iobase = soc ## _AUART ## hwid ## _BASE_ADDR, \ .irq = soc ## _INT_AUART ## hwid, \ } #define mxs_auart_data_entry(soc, _id, hwid) \ [_id] = mxs_auart_data_entry_single(soc, _id, hwid) #ifdef CONFIG_SOC_IMX23 const struct mxs_auart_data mx23_auart_data[] __initconst = { #define mx23_auart_data_entry(_id, hwid) \ mxs_auart_data_entry(MX23, _id, hwid) mx23_auart_data_entry(0, 1), mx23_auart_data_entry(1, 2), }; #endif #ifdef CONFIG_SOC_IMX28 const struct mxs_auart_data mx28_auart_data[] __initconst = { #define mx28_auart_data_entry(_id) \ mxs_auart_data_entry(MX28, _id, _id) mx28_auart_data_entry(0), mx28_auart_data_entry(1), mx28_auart_data_entry(2), mx28_auart_data_entry(3), mx28_auart_data_entry(4), }; #endif struct platform_device *__init mxs_add_auart( const struct mxs_auart_data *data) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + SZ_8K - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return mxs_add_platform_device_dmamask("mxs-auart", data->id, res, ARRAY_SIZE(res), NULL, 0, DMA_BIT_MASK(32)); }
gpl-2.0
rr-zenfone2/android_kernel_asus_moorefield
drivers/of/of_pci.c
7416
1067
#include <linux/kernel.h> #include <linux/export.h> #include <linux/of.h> #include <linux/of_pci.h> #include <asm/prom.h> static inline int __of_pci_pci_compare(struct device_node *node, unsigned int devfn) { unsigned int size; const __be32 *reg = of_get_property(node, "reg", &size); if (!reg || size < 5 * sizeof(__be32)) return 0; return ((be32_to_cpup(&reg[0]) >> 8) & 0xff) == devfn; } struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn) { struct device_node *node, *node2; for_each_child_of_node(parent, node) { if (__of_pci_pci_compare(node, devfn)) return node; /* * Some OFs create a parent node "multifunc-device" as * a fake root for all functions of a multi-function * device we go down them as well. */ if (!strcmp(node->name, "multifunc-device")) { for_each_child_of_node(node, node2) { if (__of_pci_pci_compare(node2, devfn)) { of_node_put(node); return node2; } } } } return NULL; } EXPORT_SYMBOL_GPL(of_pci_find_child_device);
gpl-2.0
neobuddy89/yu_msm8916
arch/mn10300/mm/cache-smp.c
11768
2969
/* SMP global caching code * * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/threads.h> #include <linux/interrupt.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/smp.h> #include "cache-smp.h" DEFINE_SPINLOCK(smp_cache_lock); static unsigned long smp_cache_mask; static unsigned long smp_cache_start; static unsigned long smp_cache_end; static cpumask_t smp_cache_ipi_map; /* Bitmask of cache IPI done CPUs */ /** * smp_cache_interrupt - Handle IPI request to flush caches. * * Handle a request delivered by IPI to flush the current CPU's * caches. The parameters are stored in smp_cache_*. */ void smp_cache_interrupt(void) { unsigned long opr_mask = smp_cache_mask; switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) { case SMP_DCACHE_NOP: break; case SMP_DCACHE_INV: mn10300_local_dcache_inv(); break; case SMP_DCACHE_INV_RANGE: mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end); break; case SMP_DCACHE_FLUSH: mn10300_local_dcache_flush(); break; case SMP_DCACHE_FLUSH_RANGE: mn10300_local_dcache_flush_range(smp_cache_start, smp_cache_end); break; case SMP_DCACHE_FLUSH_INV: mn10300_local_dcache_flush_inv(); break; case SMP_DCACHE_FLUSH_INV_RANGE: mn10300_local_dcache_flush_inv_range(smp_cache_start, smp_cache_end); break; } switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) { case SMP_ICACHE_NOP: break; case SMP_ICACHE_INV: mn10300_local_icache_inv(); break; case SMP_ICACHE_INV_RANGE: mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end); break; } cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map); } /** * smp_cache_call - Issue an IPI to request the other CPUs flush caches * @opr_mask: Cache operation flags * @start: Start address of request * @end: End address of request * * Send cache flush IPI to other CPUs. This invokes smp_cache_interrupt() * above on those other CPUs and then waits for them to finish. * * The caller must hold smp_cache_lock. */ void smp_cache_call(unsigned long opr_mask, unsigned long start, unsigned long end) { smp_cache_mask = opr_mask; smp_cache_start = start; smp_cache_end = end; cpumask_copy(&smp_cache_ipi_map, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map); send_IPI_allbutself(FLUSH_CACHE_IPI); while (!cpumask_empty(&smp_cache_ipi_map)) /* nothing. lockup detection does not belong here */ mb(); }
gpl-2.0
Seinlin/novo7-kernel
drivers/scsi/fnic/vnic_intr.c
15096
1844
/* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> #include "vnic_dev.h" #include "vnic_intr.h" void vnic_intr_free(struct vnic_intr *intr) { intr->ctrl = NULL; } int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, unsigned int index) { intr->index = index; intr->vdev = vdev; intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); if (!intr->ctrl) { printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n", index); return -EINVAL; } return 0; } void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer, unsigned int coalescing_type, unsigned int mask_on_assertion) { iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer); iowrite32(coalescing_type, &intr->ctrl->coalescing_type); iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion); iowrite32(0, &intr->ctrl->int_credits); } void vnic_intr_clean(struct vnic_intr *intr) { iowrite32(0, &intr->ctrl->int_credits); }
gpl-2.0
buenajuan300/android_kernel_samsung_grandprimevelte
drivers/staging/rtl8188eu/core/rtw_mlme.c
249
72694
/****************************************************************************** * * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * ******************************************************************************/ #define _RTW_MLME_C_ #include <osdep_service.h> #include <drv_types.h> #include <recv_osdep.h> #include <xmit_osdep.h> #include <hal_intf.h> #include <mlme_osdep.h> #include <sta_info.h> #include <wifi.h> #include <wlan_bssdef.h> #include <rtw_ioctl_set.h> #include <usb_osintf.h> extern unsigned char MCS_rate_2R[16]; extern unsigned char MCS_rate_1R[16]; int _rtw_init_mlme_priv (struct adapter *padapter) { int i; u8 *pbuf; struct wlan_network *pnetwork; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; int res = _SUCCESS; _func_enter_; /* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */ pmlmepriv->nic_hdl = (u8 *)padapter; pmlmepriv->pscanned = NULL; pmlmepriv->fw_state = 0; pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown; pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */ spin_lock_init(&(pmlmepriv->lock)); _rtw_init_queue(&(pmlmepriv->free_bss_pool)); _rtw_init_queue(&(pmlmepriv->scanned_queue)); set_scanned_network_val(pmlmepriv, 0); _rtw_memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid)); pbuf = rtw_zvmalloc(MAX_BSS_CNT * (sizeof(struct wlan_network))); if (pbuf == NULL) { res = _FAIL; goto exit; } pmlmepriv->free_bss_buf = pbuf; pnetwork = (struct wlan_network *)pbuf; for (i = 0; i < MAX_BSS_CNT; i++) { _rtw_init_listhead(&(pnetwork->list)); rtw_list_insert_tail(&(pnetwork->list), &(pmlmepriv->free_bss_pool.queue)); pnetwork++; } /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */ rtw_clear_scan_deny(padapter); rtw_init_mlme_timer(padapter); exit: _func_exit_; return res; } #if defined (CONFIG_88EU_AP_MODE) static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen) { kfree(*ppie); *plen = 0; *ppie = NULL; } void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv) { rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len); rtw_buf_free(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len); rtw_free_mlme_ie_data(&pmlmepriv->wps_beacon_ie, &pmlmepriv->wps_beacon_ie_len); rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_req_ie, &pmlmepriv->wps_probe_req_ie_len); rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_resp_ie, &pmlmepriv->wps_probe_resp_ie_len); rtw_free_mlme_ie_data(&pmlmepriv->wps_assoc_resp_ie, &pmlmepriv->wps_assoc_resp_ie_len); rtw_free_mlme_ie_data(&pmlmepriv->p2p_beacon_ie, &pmlmepriv->p2p_beacon_ie_len); rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_req_ie, &pmlmepriv->p2p_probe_req_ie_len); rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_resp_ie, &pmlmepriv->p2p_probe_resp_ie_len); rtw_free_mlme_ie_data(&pmlmepriv->p2p_go_probe_resp_ie, &pmlmepriv->p2p_go_probe_resp_ie_len); rtw_free_mlme_ie_data(&pmlmepriv->p2p_assoc_req_ie, &pmlmepriv->p2p_assoc_req_ie_len); } #else void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv) { } #endif void _rtw_free_mlme_priv (struct mlme_priv *pmlmepriv) { _func_enter_; rtw_free_mlme_priv_ie_data(pmlmepriv); if (pmlmepriv) { if (pmlmepriv->free_bss_buf) { rtw_vmfree(pmlmepriv->free_bss_buf, MAX_BSS_CNT * sizeof(struct wlan_network)); } } _func_exit_; } int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork) { _func_enter_; if (pnetwork == NULL) goto exit; spin_lock_bh(&queue->lock); rtw_list_insert_tail(&pnetwork->list, &queue->queue); spin_unlock_bh(&queue->lock); exit: _func_exit_; return _SUCCESS; } struct wlan_network *_rtw_dequeue_network(struct __queue *queue) { struct wlan_network *pnetwork; _func_enter_; spin_lock_bh(&queue->lock); if (_rtw_queue_empty(queue)) { pnetwork = NULL; } else { pnetwork = LIST_CONTAINOR(get_next(&queue->queue), struct wlan_network, list); rtw_list_delete(&(pnetwork->list)); } spin_unlock_bh(&queue->lock); _func_exit_; return pnetwork; } struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */ { struct wlan_network *pnetwork; struct __queue *free_queue = &pmlmepriv->free_bss_pool; struct list_head *plist = NULL; _func_enter_; spin_lock_bh(&free_queue->lock); if (_rtw_queue_empty(free_queue) == true) { pnetwork = NULL; goto exit; } plist = get_next(&(free_queue->queue)); pnetwork = LIST_CONTAINOR(plist , struct wlan_network, list); rtw_list_delete(&pnetwork->list); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("_rtw_alloc_network: ptr=%p\n", plist)); pnetwork->network_type = 0; pnetwork->fixed = false; pnetwork->last_scanned = jiffies; pnetwork->aid = 0; pnetwork->join_res = 0; pmlmepriv->num_of_scanned++; exit: spin_unlock_bh(&free_queue->lock); _func_exit_; return pnetwork; } void _rtw_free_network(struct mlme_priv *pmlmepriv , struct wlan_network *pnetwork, u8 isfreeall) { u32 curr_time, delta_time; u32 lifetime = SCANQUEUE_LIFETIME; struct __queue *free_queue = &(pmlmepriv->free_bss_pool); _func_enter_; if (pnetwork == NULL) goto exit; if (pnetwork->fixed) goto exit; curr_time = jiffies; if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) || (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))) lifetime = 1; if (!isfreeall) { delta_time = (curr_time - pnetwork->last_scanned)/HZ; if (delta_time < lifetime)/* unit:sec */ goto exit; } spin_lock_bh(&free_queue->lock); rtw_list_delete(&(pnetwork->list)); rtw_list_insert_tail(&(pnetwork->list), &(free_queue->queue)); pmlmepriv->num_of_scanned--; spin_unlock_bh(&free_queue->lock); exit: _func_exit_; } void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork) { struct __queue *free_queue = &(pmlmepriv->free_bss_pool); _func_enter_; if (pnetwork == NULL) goto exit; if (pnetwork->fixed) goto exit; rtw_list_delete(&(pnetwork->list)); rtw_list_insert_tail(&(pnetwork->list), get_list_head(free_queue)); pmlmepriv->num_of_scanned--; exit: _func_exit_; } /* return the wlan_network with the matching addr Shall be calle under atomic context... to avoid possible racing condition... */ struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr) { struct list_head *phead, *plist; struct wlan_network *pnetwork = NULL; u8 zero_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; _func_enter_; if (_rtw_memcmp(zero_addr, addr, ETH_ALEN)) { pnetwork = NULL; goto exit; } phead = get_list_head(scanned_queue); plist = get_next(phead); while (plist != phead) { pnetwork = LIST_CONTAINOR(plist, struct wlan_network , list); if (_rtw_memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN) == true) break; plist = get_next(plist); } if (plist == phead) pnetwork = NULL; exit: _func_exit_; return pnetwork; } void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall) { struct list_head *phead, *plist; struct wlan_network *pnetwork; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct __queue *scanned_queue = &pmlmepriv->scanned_queue; _func_enter_; spin_lock_bh(&scanned_queue->lock); phead = get_list_head(scanned_queue); plist = get_next(phead); while (rtw_end_of_queue_search(phead, plist) == false) { pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); plist = get_next(plist); _rtw_free_network(pmlmepriv, pnetwork, isfreeall); } spin_unlock_bh(&scanned_queue->lock); _func_exit_; } int rtw_if_up(struct adapter *padapter) { int res; _func_enter_; if (padapter->bDriverStopped || padapter->bSurpriseRemoved || (check_fwstate(&padapter->mlmepriv, _FW_LINKED) == false)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_if_up:bDriverStopped(%d) OR bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved)); res = false; } else { res = true; } _func_exit_; return res; } void rtw_generate_random_ibss(u8 *pibss) { u32 curtime = jiffies; _func_enter_; pibss[0] = 0x02; /* in ad-hoc mode bit1 must set to 1 */ pibss[1] = 0x11; pibss[2] = 0x87; pibss[3] = (u8)(curtime & 0xff);/* p[0]; */ pibss[4] = (u8)((curtime>>8) & 0xff);/* p[1]; */ pibss[5] = (u8)((curtime>>16) & 0xff);/* p[2]; */ _func_exit_; return; } u8 *rtw_get_capability_from_ie(u8 *ie) { return ie + 8 + 2; } u16 rtw_get_capability(struct wlan_bssid_ex *bss) { __le16 val; _func_enter_; memcpy((u8 *)&val, rtw_get_capability_from_ie(bss->IEs), 2); _func_exit_; return le16_to_cpu(val); } u8 *rtw_get_timestampe_from_ie(u8 *ie) { return ie + 0; } u8 *rtw_get_beacon_interval_from_ie(u8 *ie) { return ie + 8; } int rtw_init_mlme_priv (struct adapter *padapter)/* struct mlme_priv *pmlmepriv) */ { int res; _func_enter_; res = _rtw_init_mlme_priv(padapter);/* (pmlmepriv); */ _func_exit_; return res; } void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv) { _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_mlme_priv\n")); _rtw_free_mlme_priv (pmlmepriv); _func_exit_; } static struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv) { struct wlan_network *pnetwork; _func_enter_; pnetwork = _rtw_alloc_network(pmlmepriv); _func_exit_; return pnetwork; } static void rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork) { _func_enter_; _rtw_free_network_nolock(pmlmepriv, pnetwork); _func_exit_; } void rtw_free_network_queue(struct adapter *dev, u8 isfreeall) { _func_enter_; _rtw_free_network_queue(dev, isfreeall); _func_exit_; } /* return the wlan_network with the matching addr Shall be calle under atomic context... to avoid possible racing condition... */ struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr) { struct wlan_network *pnetwork = _rtw_find_network(scanned_queue, addr); return pnetwork; } int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork) { int ret = true; struct security_priv *psecuritypriv = &adapter->securitypriv; if ((psecuritypriv->dot11PrivacyAlgrthm != _NO_PRIVACY_) && (pnetwork->network.Privacy == 0)) ret = false; else if ((psecuritypriv->dot11PrivacyAlgrthm == _NO_PRIVACY_) && (pnetwork->network.Privacy == 1)) ret = false; else ret = true; return ret; } static int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b) { return (a->Ssid.SsidLength == b->Ssid.SsidLength) && _rtw_memcmp(a->Ssid.Ssid, b->Ssid.Ssid, a->Ssid.SsidLength); } int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst) { u16 s_cap, d_cap; __le16 le_scap, le_dcap; _func_enter_; memcpy((u8 *)&le_scap, rtw_get_capability_from_ie(src->IEs), 2); memcpy((u8 *)&le_dcap, rtw_get_capability_from_ie(dst->IEs), 2); s_cap = le16_to_cpu(le_scap); d_cap = le16_to_cpu(le_dcap); _func_exit_; return ((src->Ssid.SsidLength == dst->Ssid.SsidLength) && ((_rtw_memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN)) == true) && ((_rtw_memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength)) == true) && ((s_cap & WLAN_CAPABILITY_IBSS) == (d_cap & WLAN_CAPABILITY_IBSS)) && ((s_cap & WLAN_CAPABILITY_BSS) == (d_cap & WLAN_CAPABILITY_BSS))); } struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue) { struct list_head *plist, *phead; struct wlan_network *pwlan = NULL; struct wlan_network *oldest = NULL; _func_enter_; phead = get_list_head(scanned_queue); plist = get_next(phead); while (1) { if (rtw_end_of_queue_search(phead, plist) == true) break; pwlan = LIST_CONTAINOR(plist, struct wlan_network, list); if (!pwlan->fixed) { if (oldest == NULL || time_after(oldest->last_scanned, pwlan->last_scanned)) oldest = pwlan; } plist = get_next(plist); } _func_exit_; return oldest; } void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src, struct adapter *padapter, bool update_ie) { long rssi_ori = dst->Rssi; u8 sq_smp = src->PhyInfo.SignalQuality; u8 ss_final; u8 sq_final; long rssi_final; _func_enter_; rtw_hal_antdiv_rssi_compared(padapter, dst, src); /* this will update src.Rssi, need consider again */ /* The rule below is 1/5 for sample value, 4/5 for history value */ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) && is_same_network(&(padapter->mlmepriv.cur_network.network), src)) { /* Take the recvpriv's value for the connected AP*/ ss_final = padapter->recvpriv.signal_strength; sq_final = padapter->recvpriv.signal_qual; /* the rssi value here is undecorated, and will be used for antenna diversity */ if (sq_smp != 101) /* from the right channel */ rssi_final = (src->Rssi+dst->Rssi*4)/5; else rssi_final = rssi_ori; } else { if (sq_smp != 101) { /* from the right channel */ ss_final = ((u32)(src->PhyInfo.SignalStrength)+(u32)(dst->PhyInfo.SignalStrength)*4)/5; sq_final = ((u32)(src->PhyInfo.SignalQuality)+(u32)(dst->PhyInfo.SignalQuality)*4)/5; rssi_final = (src->Rssi+dst->Rssi*4)/5; } else { /* bss info not receiving from the right channel, use the original RX signal infos */ ss_final = dst->PhyInfo.SignalStrength; sq_final = dst->PhyInfo.SignalQuality; rssi_final = dst->Rssi; } } if (update_ie) memcpy((u8 *)dst, (u8 *)src, get_wlan_bssid_ex_sz(src)); dst->PhyInfo.SignalStrength = ss_final; dst->PhyInfo.SignalQuality = sq_final; dst->Rssi = rssi_final; _func_exit_; } static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex *pnetwork) { struct mlme_priv *pmlmepriv = &(adapter->mlmepriv); _func_enter_; if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) && (is_same_network(&(pmlmepriv->cur_network.network), pnetwork))) { update_network(&(pmlmepriv->cur_network.network), pnetwork, adapter, true); rtw_update_protection(adapter, (pmlmepriv->cur_network.network.IEs) + sizeof(struct ndis_802_11_fixed_ie), pmlmepriv->cur_network.network.IELength); } _func_exit_; } /* Caller must hold pmlmepriv->lock first. */ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *target) { struct list_head *plist, *phead; u32 bssid_ex_sz; struct mlme_priv *pmlmepriv = &(adapter->mlmepriv); struct __queue *queue = &(pmlmepriv->scanned_queue); struct wlan_network *pnetwork = NULL; struct wlan_network *oldest = NULL; _func_enter_; spin_lock_bh(&queue->lock); phead = get_list_head(queue); plist = get_next(phead); while (1) { if (rtw_end_of_queue_search(phead, plist) == true) break; pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); if (is_same_network(&(pnetwork->network), target)) break; if ((oldest == ((struct wlan_network *)0)) || time_after(oldest->last_scanned, pnetwork->last_scanned)) oldest = pnetwork; plist = get_next(plist); } /* If we didn't find a match, then get a new network slot to initialize * with this beacon's information */ if (rtw_end_of_queue_search(phead, plist) == true) { if (_rtw_queue_empty(&(pmlmepriv->free_bss_pool)) == true) { /* If there are no more slots, expire the oldest */ pnetwork = oldest; rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(target->PhyInfo.Optimum_antenna)); memcpy(&(pnetwork->network), target, get_wlan_bssid_ex_sz(target)); /* variable initialize */ pnetwork->fixed = false; pnetwork->last_scanned = jiffies; pnetwork->network_type = 0; pnetwork->aid = 0; pnetwork->join_res = 0; /* bss info not receiving from the right channel */ if (pnetwork->network.PhyInfo.SignalQuality == 101) pnetwork->network.PhyInfo.SignalQuality = 0; } else { /* Otherwise just pull from the free list */ pnetwork = rtw_alloc_network(pmlmepriv); /* will update scan_time */ if (pnetwork == NULL) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n\nsomething wrong here\n\n\n")); goto exit; } bssid_ex_sz = get_wlan_bssid_ex_sz(target); target->Length = bssid_ex_sz; rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(target->PhyInfo.Optimum_antenna)); memcpy(&(pnetwork->network), target, bssid_ex_sz); pnetwork->last_scanned = jiffies; /* bss info not receiving from the right channel */ if (pnetwork->network.PhyInfo.SignalQuality == 101) pnetwork->network.PhyInfo.SignalQuality = 0; rtw_list_insert_tail(&(pnetwork->list), &(queue->queue)); } } else { /* we have an entry and we are going to update it. But this entry may * be already expired. In this case we do the same as we found a new * net and call the new_net handler */ bool update_ie = true; pnetwork->last_scanned = jiffies; /* target.Reserved[0]== 1, means that scanned network is a bcn frame. */ if ((pnetwork->network.IELength > target->IELength) && (target->Reserved[0] == 1)) update_ie = false; update_network(&(pnetwork->network), target, adapter, update_ie); } exit: spin_unlock_bh(&queue->lock); _func_exit_; } static void rtw_add_network(struct adapter *adapter, struct wlan_bssid_ex *pnetwork) { _func_enter_; #if defined(CONFIG_88EU_P2P) rtw_wlan_bssid_ex_remove_p2p_attr(pnetwork, P2P_ATTR_GROUP_INFO); #endif update_current_network(adapter, pnetwork); rtw_update_scanned_network(adapter, pnetwork); _func_exit_; } /* select the desired network based on the capability of the (i)bss. */ /* check items: (1) security */ /* (2) network_type */ /* (3) WMM */ /* (4) HT */ /* (5) others */ static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwork) { struct security_priv *psecuritypriv = &adapter->securitypriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; u32 desired_encmode; u32 privacy; /* u8 wps_ie[512]; */ uint wps_ielen; int bselected = true; desired_encmode = psecuritypriv->ndisencryptstatus; privacy = pnetwork->network.Privacy; if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) { if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen) != NULL) return true; else return false; } if (adapter->registrypriv.wifi_spec == 1) { /* for correct flow of 8021X to do.... */ if ((desired_encmode == Ndis802_11EncryptionDisabled) && (privacy != 0)) bselected = false; } if ((desired_encmode != Ndis802_11EncryptionDisabled) && (privacy == 0)) { DBG_88E("desired_encmode: %d, privacy: %d\n", desired_encmode, privacy); bselected = false; } if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) { if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode) bselected = false; } return bselected; } /* TODO: Perry: For Power Management */ void rtw_atimdone_event_callback(struct adapter *adapter , u8 *pbuf) { _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_evet\n")); _func_exit_; return; } void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf) { u32 len; struct wlan_bssid_ex *pnetwork; struct mlme_priv *pmlmepriv = &(adapter->mlmepriv); _func_enter_; pnetwork = (struct wlan_bssid_ex *)pbuf; RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_survey_event_callback, ssid=%s\n", pnetwork->Ssid.Ssid)); len = get_wlan_bssid_ex_sz(pnetwork); if (len > (sizeof(struct wlan_bssid_ex))) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n****rtw_survey_event_callback: return a wrong bss ***\n")); return; } spin_lock_bh(&pmlmepriv->lock); /* update IBSS_network 's timestamp */ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) { if (_rtw_memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) { struct wlan_network *ibss_wlan = NULL; memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8); spin_lock_bh(&(pmlmepriv->scanned_queue.lock)); ibss_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->MacAddress); if (ibss_wlan) { memcpy(ibss_wlan->network.IEs , pnetwork->IEs, 8); spin_unlock_bh(&pmlmepriv->scanned_queue.lock); goto exit; } spin_unlock_bh(&pmlmepriv->scanned_queue.lock); } } /* lock pmlmepriv->lock when you accessing network_q */ if ((check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) == false) { if (pnetwork->Ssid.Ssid[0] == 0) pnetwork->Ssid.SsidLength = 0; rtw_add_network(adapter, pnetwork); } exit: spin_unlock_bh(&pmlmepriv->lock); _func_exit_; return; } void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf) { struct mlme_priv *pmlmepriv = &(adapter->mlmepriv); struct mlme_ext_priv *pmlmeext; _func_enter_; spin_lock_bh(&pmlmepriv->lock); if (pmlmepriv->wps_probe_req_ie) { pmlmepriv->wps_probe_req_ie_len = 0; kfree(pmlmepriv->wps_probe_req_ie); pmlmepriv->wps_probe_req_ie = NULL; } RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_surveydone_event_callback: fw_state:%x\n\n", get_fwstate(pmlmepriv))); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) { u8 timer_cancelled; _cancel_timer(&pmlmepriv->scan_to_timer, &timer_cancelled); _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); } else { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("nic status=%x, survey done event comes too late!\n", get_fwstate(pmlmepriv))); } rtw_set_signal_stat_timer(&adapter->recvpriv); if (pmlmepriv->to_join) { if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) { if (check_fwstate(pmlmepriv, _FW_LINKED) == false) { set_fwstate(pmlmepriv, _FW_UNDER_LINKING); if (rtw_select_and_join_from_scanned_queue(pmlmepriv) == _SUCCESS) { _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT); } else { struct wlan_bssid_ex *pdev_network = &(adapter->registrypriv.dev_network); u8 *pibss = adapter->registrypriv.dev_network.MacAddress; _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("switching to adhoc master\n")); _rtw_memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid)); rtw_update_registrypriv_dev_network(adapter); rtw_generate_random_ibss(pibss); pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE; if (rtw_createbss_cmd(adapter) != _SUCCESS) RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error=>rtw_createbss_cmd status FAIL\n")); pmlmepriv->to_join = false; } } } else { int s_ret; set_fwstate(pmlmepriv, _FW_UNDER_LINKING); pmlmepriv->to_join = false; s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv); if (_SUCCESS == s_ret) { _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT); } else if (s_ret == 2) { /* there is no need to wait for join */ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); rtw_indicate_connect(adapter); } else { DBG_88E("try_to_join, but select scanning queue fail, to_roaming:%d\n", pmlmepriv->to_roaming); if (pmlmepriv->to_roaming != 0) { if (--pmlmepriv->to_roaming == 0 || _SUCCESS != rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0)) { pmlmepriv->to_roaming = 0; rtw_free_assoc_resources(adapter, 1); rtw_indicate_disconnect(adapter); } else { pmlmepriv->to_join = true; } } _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); } } } indicate_wx_scan_complete_event(adapter); spin_unlock_bh(&pmlmepriv->lock); if (check_fwstate(pmlmepriv, _FW_LINKED) == true) p2p_ps_wk_cmd(adapter, P2P_PS_SCAN_DONE, 0); rtw_os_xmit_schedule(adapter); pmlmeext = &adapter->mlmeextpriv; if (pmlmeext->sitesurvey_res.bss_cnt == 0) rtw_hal_sreset_reset(adapter); _func_exit_; } void rtw_dummy_event_callback(struct adapter *adapter , u8 *pbuf) { } void rtw_fwdbg_event_callback(struct adapter *adapter , u8 *pbuf) { } static void free_scanqueue(struct mlme_priv *pmlmepriv) { struct __queue *free_queue = &pmlmepriv->free_bss_pool; struct __queue *scan_queue = &pmlmepriv->scanned_queue; struct list_head *plist, *phead, *ptemp; _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+free_scanqueue\n")); spin_lock_bh(&scan_queue->lock); spin_lock_bh(&free_queue->lock); phead = get_list_head(scan_queue); plist = get_next(phead); while (plist != phead) { ptemp = get_next(plist); rtw_list_delete(plist); rtw_list_insert_tail(plist, &free_queue->queue); plist = ptemp; pmlmepriv->num_of_scanned--; } spin_unlock_bh(&free_queue->lock); spin_unlock_bh(&scan_queue->lock); _func_exit_; } /* *rtw_free_assoc_resources: the caller has to lock pmlmepriv->lock */ void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue) { struct wlan_network *pwlan = NULL; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct sta_priv *pstapriv = &adapter->stapriv; struct wlan_network *tgt_network = &pmlmepriv->cur_network; _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_free_assoc_resources\n")); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("tgt_network->network.MacAddress=%pM ssid=%s\n", tgt_network->network.MacAddress, tgt_network->network.Ssid.Ssid)); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_AP_STATE)) { struct sta_info *psta; psta = rtw_get_stainfo(&adapter->stapriv, tgt_network->network.MacAddress); spin_lock_bh(&(pstapriv->sta_hash_lock)); rtw_free_stainfo(adapter, psta); spin_unlock_bh(&pstapriv->sta_hash_lock); } if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE)) { struct sta_info *psta; rtw_free_all_stainfo(adapter); psta = rtw_get_bcmc_stainfo(adapter); spin_lock_bh(&(pstapriv->sta_hash_lock)); rtw_free_stainfo(adapter, psta); spin_unlock_bh(&pstapriv->sta_hash_lock); rtw_init_bcmc_stainfo(adapter); } if (lock_scanned_queue) spin_lock_bh(&(pmlmepriv->scanned_queue.lock)); pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress); if (pwlan) pwlan->fixed = false; else RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_assoc_resources:pwlan==NULL\n\n")); if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) && (adapter->stapriv.asoc_sta_count == 1))) rtw_free_network_nolock(pmlmepriv, pwlan); if (lock_scanned_queue) spin_unlock_bh(&pmlmepriv->scanned_queue.lock); pmlmepriv->key_mask = 0; _func_exit_; } /* *rtw_indicate_connect: the caller has to lock pmlmepriv->lock */ void rtw_indicate_connect(struct adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_connect\n")); pmlmepriv->to_join = false; if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) { set_fwstate(pmlmepriv, _FW_LINKED); rtw_led_control(padapter, LED_CTL_LINK); rtw_os_indicate_connect(padapter); } pmlmepriv->to_roaming = 0; rtw_set_scan_deny(padapter, 3000); RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("-rtw_indicate_connect: fw_state=0x%08x\n", get_fwstate(pmlmepriv))); _func_exit_; } /* *rtw_indicate_disconnect: the caller has to lock pmlmepriv->lock */ void rtw_indicate_disconnect(struct adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_disconnect\n")); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING | WIFI_UNDER_WPS); if (pmlmepriv->to_roaming > 0) _clr_fwstate_(pmlmepriv, _FW_LINKED); if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) || (pmlmepriv->to_roaming <= 0)) { rtw_os_indicate_disconnect(padapter); _clr_fwstate_(pmlmepriv, _FW_LINKED); rtw_led_control(padapter, LED_CTL_NO_LINK); rtw_clear_scan_deny(padapter); } p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1); rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_DISCONNECT, 1); _func_exit_; } inline void rtw_indicate_scan_done(struct adapter *padapter, bool aborted) { rtw_os_indicate_scan_done(padapter, aborted); } void rtw_scan_abort(struct adapter *adapter) { u32 start; struct mlme_priv *pmlmepriv = &(adapter->mlmepriv); struct mlme_ext_priv *pmlmeext = &(adapter->mlmeextpriv); start = jiffies; pmlmeext->scan_abort = true; while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) && rtw_get_passing_time_ms(start) <= 200) { if (adapter->bDriverStopped || adapter->bSurpriseRemoved) break; DBG_88E(FUNC_NDEV_FMT"fw_state=_FW_UNDER_SURVEY!\n", FUNC_NDEV_ARG(adapter->pnetdev)); msleep(20); } if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) { if (!adapter->bDriverStopped && !adapter->bSurpriseRemoved) DBG_88E(FUNC_NDEV_FMT"waiting for scan_abort time out!\n", FUNC_NDEV_ARG(adapter->pnetdev)); rtw_indicate_scan_done(adapter, true); } pmlmeext->scan_abort = false; } static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, struct wlan_network *pnetwork) { int i; struct sta_info *bmc_sta, *psta = NULL; struct recv_reorder_ctrl *preorder_ctrl; struct sta_priv *pstapriv = &padapter->stapriv; psta = rtw_get_stainfo(pstapriv, pnetwork->network.MacAddress); if (psta == NULL) psta = rtw_alloc_stainfo(pstapriv, pnetwork->network.MacAddress); if (psta) { /* update ptarget_sta */ DBG_88E("%s\n", __func__); psta->aid = pnetwork->join_res; psta->mac_id = 0; /* sta mode */ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true); /* security related */ if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) { padapter->securitypriv.binstallGrpkey = false; padapter->securitypriv.busetkipkey = false; padapter->securitypriv.bgrpkey_handshake = false; psta->ieee8021x_blocked = true; psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm; _rtw_memset((u8 *)&psta->dot118021x_UncstKey, 0, sizeof(union Keytype)); _rtw_memset((u8 *)&psta->dot11tkiprxmickey, 0, sizeof(union Keytype)); _rtw_memset((u8 *)&psta->dot11tkiptxmickey, 0, sizeof(union Keytype)); _rtw_memset((u8 *)&psta->dot11txpn, 0, sizeof(union pn48)); _rtw_memset((u8 *)&psta->dot11rxpn, 0, sizeof(union pn48)); } /* Commented by Albert 2012/07/21 */ /* When doing the WPS, the wps_ie_len won't equal to 0 */ /* And the Wi-Fi driver shouldn't allow the data packet to be tramsmitted. */ if (padapter->securitypriv.wps_ie_len != 0) { psta->ieee8021x_blocked = true; padapter->securitypriv.wps_ie_len = 0; } /* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info */ /* if A-MPDU Rx is enabled, resetting rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */ /* todo: check if AP can send A-MPDU packets */ for (i = 0; i < 16; i++) { /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */ preorder_ctrl = &psta->recvreorder_ctrl[i]; preorder_ctrl->enable = false; preorder_ctrl->indicate_seq = 0xffff; preorder_ctrl->wend_b = 0xffff; preorder_ctrl->wsize_b = 64;/* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */ } bmc_sta = rtw_get_bcmc_stainfo(padapter); if (bmc_sta) { for (i = 0; i < 16; i++) { /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */ preorder_ctrl = &bmc_sta->recvreorder_ctrl[i]; preorder_ctrl->enable = false; preorder_ctrl->indicate_seq = 0xffff; preorder_ctrl->wend_b = 0xffff; preorder_ctrl->wsize_b = 64;/* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */ } } /* misc. */ update_sta_info(padapter, psta); } return psta; } /* pnetwork: returns from rtw_joinbss_event_callback */ /* ptarget_wlan: found from scanned_queue */ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_network *ptarget_wlan, struct wlan_network *pnetwork) { struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct wlan_network *cur_network = &(pmlmepriv->cur_network); DBG_88E("%s\n", __func__); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\nfw_state:%x, BSSID:%pM\n", get_fwstate(pmlmepriv), pnetwork->network.MacAddress)); /* why not use ptarget_wlan?? */ memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length); /* some IEs in pnetwork is wrong, so we should use ptarget_wlan IEs */ cur_network->network.IELength = ptarget_wlan->network.IELength; memcpy(&cur_network->network.IEs[0], &ptarget_wlan->network.IEs[0], MAX_IE_SZ); cur_network->aid = pnetwork->join_res; rtw_set_signal_stat_timer(&padapter->recvpriv); padapter->recvpriv.signal_strength = ptarget_wlan->network.PhyInfo.SignalStrength; padapter->recvpriv.signal_qual = ptarget_wlan->network.PhyInfo.SignalQuality; /* the ptarget_wlan->network.Rssi is raw data, we use ptarget_wlan->network.PhyInfo.SignalStrength instead (has scaled) */ padapter->recvpriv.rssi = translate_percentage_to_dbm(ptarget_wlan->network.PhyInfo.SignalStrength); rtw_set_signal_stat_timer(&padapter->recvpriv); /* update fw_state will clr _FW_UNDER_LINKING here indirectly */ switch (pnetwork->network.InfrastructureMode) { case Ndis802_11Infrastructure: if (pmlmepriv->fw_state&WIFI_UNDER_WPS) pmlmepriv->fw_state = WIFI_STATION_STATE|WIFI_UNDER_WPS; else pmlmepriv->fw_state = WIFI_STATION_STATE; break; case Ndis802_11IBSS: pmlmepriv->fw_state = WIFI_ADHOC_STATE; break; default: pmlmepriv->fw_state = WIFI_NULL_STATE; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Invalid network_mode\n")); break; } rtw_update_protection(padapter, (cur_network->network.IEs) + sizeof(struct ndis_802_11_fixed_ie), (cur_network->network.IELength)); rtw_update_ht_cap(padapter, cur_network->network.IEs, cur_network->network.IELength); } /* Notes: the function could be > passive_level (the same context as Rx tasklet) */ /* pnetwork: returns from rtw_joinbss_event_callback */ /* ptarget_wlan: found from scanned_queue */ /* if join_res > 0, for (fw_state == WIFI_STATION_STATE), we check if "ptarget_sta" & "ptarget_wlan" exist. */ /* if join_res > 0, for (fw_state == WIFI_ADHOC_STATE), we only check if "ptarget_wlan" exist. */ /* if join_res > 0, update "cur_network->network" from "pnetwork->network" if (ptarget_wlan != NULL). */ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf) { u8 timer_cancelled; struct sta_info *ptarget_sta = NULL, *pcur_sta = NULL; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &(adapter->mlmepriv); struct wlan_network *pnetwork = (struct wlan_network *)pbuf; struct wlan_network *cur_network = &(pmlmepriv->cur_network); struct wlan_network *pcur_wlan = NULL, *ptarget_wlan = NULL; unsigned int the_same_macaddr = false; _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("joinbss event call back received with res=%d\n", pnetwork->join_res)); rtw_get_encrypt_decrypt_from_registrypriv(adapter); if (pmlmepriv->assoc_ssid.SsidLength == 0) RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ joinbss event call back for Any SSid\n")); else RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ rtw_joinbss_event_callback for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid)); the_same_macaddr = _rtw_memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN); pnetwork->network.Length = get_wlan_bssid_ex_sz(&pnetwork->network); if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex)) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n ***joinbss_evt_callback return a wrong bss ***\n\n")); goto ignore_nolock; } spin_lock_bh(&pmlmepriv->lock); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\nrtw_joinbss_event_callback!! _enter_critical\n")); if (pnetwork->join_res > 0) { spin_lock_bh(&(pmlmepriv->scanned_queue.lock)); if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) { /* s1. find ptarget_wlan */ if (check_fwstate(pmlmepriv, _FW_LINKED)) { if (the_same_macaddr) { ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress); } else { pcur_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress); if (pcur_wlan) pcur_wlan->fixed = false; pcur_sta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress); if (pcur_sta) { spin_lock_bh(&(pstapriv->sta_hash_lock)); rtw_free_stainfo(adapter, pcur_sta); spin_unlock_bh(&pstapriv->sta_hash_lock); } ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) { if (ptarget_wlan) ptarget_wlan->fixed = true; } } } else { ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) { if (ptarget_wlan) ptarget_wlan->fixed = true; } } /* s2. update cur_network */ if (ptarget_wlan) { rtw_joinbss_update_network(adapter, ptarget_wlan, pnetwork); } else { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't find ptarget_wlan when joinbss_event callback\n")); spin_unlock_bh(&pmlmepriv->scanned_queue.lock); goto ignore_joinbss_callback; } /* s3. find ptarget_sta & update ptarget_sta after update cur_network only for station mode */ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) { ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork); if (ptarget_sta == NULL) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't update stainfo when joinbss_event callback\n")); spin_unlock_bh(&pmlmepriv->scanned_queue.lock); goto ignore_joinbss_callback; } } /* s4. indicate connect */ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) { rtw_indicate_connect(adapter); } else { /* adhoc mode will rtw_indicate_connect when rtw_stassoc_event_callback */ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("adhoc mode, fw_state:%x", get_fwstate(pmlmepriv))); } /* s5. Cancle assoc_timer */ _cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("Cancle assoc_timer\n")); } else { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_joinbss_event_callback err: fw_state:%x", get_fwstate(pmlmepriv))); spin_unlock_bh(&pmlmepriv->scanned_queue.lock); goto ignore_joinbss_callback; } spin_unlock_bh(&pmlmepriv->scanned_queue.lock); } else if (pnetwork->join_res == -4) { rtw_reset_securitypriv(adapter); _set_timer(&pmlmepriv->assoc_timer, 1); if ((check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) == true) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("fail! clear _FW_UNDER_LINKING ^^^fw_state=%x\n", get_fwstate(pmlmepriv))); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); } } else { /* if join_res < 0 (join fails), then try again */ _set_timer(&pmlmepriv->assoc_timer, 1); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); } ignore_joinbss_callback: spin_unlock_bh(&pmlmepriv->lock); ignore_nolock: _func_exit_; } void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf) { struct wlan_network *pnetwork = (struct wlan_network *)pbuf; _func_enter_; mlmeext_joinbss_event_callback(adapter, pnetwork->join_res); rtw_os_xmit_schedule(adapter); _func_exit_; } static u8 search_max_mac_id(struct adapter *padapter) { u8 mac_id; #if defined (CONFIG_88EU_AP_MODE) u8 aid; struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct sta_priv *pstapriv = &padapter->stapriv; #endif struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv); struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); #if defined (CONFIG_88EU_AP_MODE) if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { for (aid = (pstapriv->max_num_sta); aid > 0; aid--) { if (pstapriv->sta_aid[aid-1] != NULL) break; } mac_id = aid + 1; } else #endif {/* adhoc id = 31~2 */ for (mac_id = (NUM_STA-1); mac_id >= IBSS_START_MAC_ID; mac_id--) { if (pmlmeinfo->FW_sta_info[mac_id].status == 1) break; } } return mac_id; } /* FOR AP , AD-HOC mode */ void rtw_stassoc_hw_rpt(struct adapter *adapter, struct sta_info *psta) { u16 media_status; u8 macid; if (psta == NULL) return; macid = search_max_mac_id(adapter); rtw_hal_set_hwreg(adapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&macid); media_status = (psta->mac_id<<8)|1; /* MACID|OPMODE:1 connect */ rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status); } void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf) { struct sta_info *psta; struct mlme_priv *pmlmepriv = &(adapter->mlmepriv); struct stassoc_event *pstassoc = (struct stassoc_event *)pbuf; struct wlan_network *cur_network = &(pmlmepriv->cur_network); struct wlan_network *ptarget_wlan = NULL; _func_enter_; if (rtw_access_ctrl(adapter, pstassoc->macaddr) == false) return; #if defined (CONFIG_88EU_AP_MODE) if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr); if (psta) { ap_sta_info_defer_update(adapter, psta); rtw_stassoc_hw_rpt(adapter, psta); } goto exit; } #endif /* for AD-HOC mode */ psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr); if (psta != NULL) { /* the sta have been in sta_info_queue => do nothing */ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error: rtw_stassoc_event_callback: sta has been in sta_hash_queue\n")); goto exit; /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */ } psta = rtw_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr); if (psta == NULL) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't alloc sta_info when rtw_stassoc_event_callback\n")); goto exit; } /* to do: init sta_info variable */ psta->qos_option = 0; psta->mac_id = (uint)pstassoc->cam_id; DBG_88E("%s\n", __func__); /* for ad-hoc mode */ rtw_hal_set_odm_var(adapter, HAL_ODM_STA_INFO, psta, true); rtw_stassoc_hw_rpt(adapter, psta); if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm; psta->ieee8021x_blocked = false; spin_lock_bh(&pmlmepriv->lock); if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) || (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))) { if (adapter->stapriv.asoc_sta_count == 2) { spin_lock_bh(&(pmlmepriv->scanned_queue.lock)); ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress); if (ptarget_wlan) ptarget_wlan->fixed = true; spin_unlock_bh(&pmlmepriv->scanned_queue.lock); /* a sta + bc/mc_stainfo (not Ibss_stainfo) */ rtw_indicate_connect(adapter); } } spin_unlock_bh(&pmlmepriv->lock); mlmeext_sta_add_event_callback(adapter, psta); exit: _func_exit_; } void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf) { int mac_id = -1; struct sta_info *psta; struct wlan_network *pwlan = NULL; struct wlan_bssid_ex *pdev_network = NULL; u8 *pibss = NULL; struct mlme_priv *pmlmepriv = &(adapter->mlmepriv); struct stadel_event *pstadel = (struct stadel_event *)pbuf; struct sta_priv *pstapriv = &adapter->stapriv; struct wlan_network *tgt_network = &(pmlmepriv->cur_network); _func_enter_; psta = rtw_get_stainfo(&adapter->stapriv, pstadel->macaddr); if (psta) mac_id = psta->mac_id; else mac_id = pstadel->mac_id; DBG_88E("%s(mac_id=%d)=%pM\n", __func__, mac_id, pstadel->macaddr); if (mac_id >= 0) { u16 media_status; media_status = (mac_id<<8)|0; /* MACID|OPMODE:0 means disconnect */ /* for STA, AP, ADHOC mode, report disconnect stauts to FW */ rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status); } if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) return; mlmeext_sta_del_event_callback(adapter); spin_lock_bh(&pmlmepriv->lock); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) { if (pmlmepriv->to_roaming > 0) pmlmepriv->to_roaming--; /* this stadel_event is caused by roaming, decrease to_roaming */ else if (pmlmepriv->to_roaming == 0) pmlmepriv->to_roaming = adapter->registrypriv.max_roaming_times; if (*((unsigned short *)(pstadel->rsvd)) != WLAN_REASON_EXPIRATION_CHK) pmlmepriv->to_roaming = 0; /* don't roam */ rtw_free_uc_swdec_pending_queue(adapter); rtw_free_assoc_resources(adapter, 1); rtw_indicate_disconnect(adapter); spin_lock_bh(&(pmlmepriv->scanned_queue.lock)); /* remove the network entry in scanned_queue */ pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress); if (pwlan) { pwlan->fixed = false; rtw_free_network_nolock(pmlmepriv, pwlan); } spin_unlock_bh(&pmlmepriv->scanned_queue.lock); _rtw_roaming(adapter, tgt_network); } if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) || check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { spin_lock_bh(&(pstapriv->sta_hash_lock)); rtw_free_stainfo(adapter, psta); spin_unlock_bh(&pstapriv->sta_hash_lock); if (adapter->stapriv.asoc_sta_count == 1) { /* a sta + bc/mc_stainfo (not Ibss_stainfo) */ spin_lock_bh(&(pmlmepriv->scanned_queue.lock)); /* free old ibss network */ pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress); if (pwlan) { pwlan->fixed = false; rtw_free_network_nolock(pmlmepriv, pwlan); } spin_unlock_bh(&pmlmepriv->scanned_queue.lock); /* re-create ibss */ pdev_network = &(adapter->registrypriv.dev_network); pibss = adapter->registrypriv.dev_network.MacAddress; memcpy(pdev_network, &tgt_network->network, get_wlan_bssid_ex_sz(&tgt_network->network)); _rtw_memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid)); rtw_update_registrypriv_dev_network(adapter); rtw_generate_random_ibss(pibss); if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { set_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE); _clr_fwstate_(pmlmepriv, WIFI_ADHOC_STATE); } if (rtw_createbss_cmd(adapter) != _SUCCESS) RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error=>stadel_event_callback: rtw_createbss_cmd status FAIL***\n ")); } } spin_unlock_bh(&pmlmepriv->lock); _func_exit_; } void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf) { _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_cpwm_event_callback !!!\n")); _func_exit_; } /* * _rtw_join_timeout_handler - Timeout/faliure handler for CMD JoinBss * @adapter: pointer to struct adapter structure */ void _rtw_join_timeout_handler (struct adapter *adapter) { struct mlme_priv *pmlmepriv = &adapter->mlmepriv; int do_join_r; _func_enter_; DBG_88E("%s, fw_state=%x\n", __func__, get_fwstate(pmlmepriv)); if (adapter->bDriverStopped || adapter->bSurpriseRemoved) return; spin_lock_bh(&pmlmepriv->lock); if (pmlmepriv->to_roaming > 0) { /* join timeout caused by roaming */ while (1) { pmlmepriv->to_roaming--; if (pmlmepriv->to_roaming != 0) { /* try another , */ DBG_88E("%s try another roaming\n", __func__); do_join_r = rtw_do_join(adapter); if (_SUCCESS != do_join_r) { DBG_88E("%s roaming do_join return %d\n", __func__ , do_join_r); continue; } break; } else { DBG_88E("%s We've try roaming but fail\n", __func__); rtw_indicate_disconnect(adapter); break; } } } else { rtw_indicate_disconnect(adapter); free_scanqueue(pmlmepriv);/* */ } spin_unlock_bh(&pmlmepriv->lock); _func_exit_; } /* * rtw_scan_timeout_handler - Timeout/Faliure handler for CMD SiteSurvey * @adapter: pointer to struct adapter structure */ void rtw_scan_timeout_handler (struct adapter *adapter) { struct mlme_priv *pmlmepriv = &adapter->mlmepriv; DBG_88E(FUNC_ADPT_FMT" fw_state=%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv)); spin_lock_bh(&pmlmepriv->lock); _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); spin_unlock_bh(&pmlmepriv->lock); rtw_indicate_scan_done(adapter, true); } static void rtw_auto_scan_handler(struct adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; /* auto site survey per 60sec */ if (pmlmepriv->scan_interval > 0) { pmlmepriv->scan_interval--; if (pmlmepriv->scan_interval == 0) { DBG_88E("%s\n", __func__); rtw_set_802_11_bssid_list_scan(padapter, NULL, 0); pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */ } } } void rtw_dynamic_check_timer_handlder(struct adapter *adapter) { struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct registry_priv *pregistrypriv = &adapter->registrypriv; if (!adapter) return; if (!adapter->hw_init_completed) return; if ((adapter->bDriverStopped) || (adapter->bSurpriseRemoved)) return; if (adapter->net_closed) return; rtw_dynamic_chk_wk_cmd(adapter); if (pregistrypriv->wifi_spec == 1) { #ifdef CONFIG_88EU_P2P struct wifidirect_info *pwdinfo = &adapter->wdinfo; if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) #endif { /* auto site survey */ rtw_auto_scan_handler(adapter); } } rcu_read_lock(); if (rcu_dereference(adapter->pnetdev->rx_handler_data) && (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == true)) { /* expire NAT2.5 entry */ nat25_db_expire(adapter); if (adapter->pppoe_connection_in_progress > 0) { adapter->pppoe_connection_in_progress--; } /* due to rtw_dynamic_check_timer_handlder() is called every 2 seconds */ if (adapter->pppoe_connection_in_progress > 0) { adapter->pppoe_connection_in_progress--; } } rcu_read_unlock(); } #define RTW_SCAN_RESULT_EXPIRE 2000 /* * Select a new join candidate from the original @param candidate and @param competitor * @return true: candidate is updated * @return false: candidate is not updated */ static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv , struct wlan_network **candidate, struct wlan_network *competitor) { int updated = false; struct adapter *adapter = container_of(pmlmepriv, struct adapter, mlmepriv); /* check bssid, if needed */ if (pmlmepriv->assoc_by_bssid) { if (!_rtw_memcmp(competitor->network.MacAddress, pmlmepriv->assoc_bssid, ETH_ALEN)) goto exit; } /* check ssid, if needed */ if (pmlmepriv->assoc_ssid.Ssid && pmlmepriv->assoc_ssid.SsidLength) { if (competitor->network.Ssid.SsidLength != pmlmepriv->assoc_ssid.SsidLength || _rtw_memcmp(competitor->network.Ssid.Ssid, pmlmepriv->assoc_ssid.Ssid, pmlmepriv->assoc_ssid.SsidLength) == false) goto exit; } if (rtw_is_desired_network(adapter, competitor) == false) goto exit; if (pmlmepriv->to_roaming) { if (rtw_get_passing_time_ms((u32)competitor->last_scanned) >= RTW_SCAN_RESULT_EXPIRE || is_same_ess(&competitor->network, &pmlmepriv->cur_network.network) == false) goto exit; } if (*candidate == NULL || (*candidate)->network.Rssi < competitor->network.Rssi) { *candidate = competitor; updated = true; } if (updated) { DBG_88E("[by_bssid:%u][assoc_ssid:%s]new candidate: %s(%pM rssi:%d\n", pmlmepriv->assoc_by_bssid, pmlmepriv->assoc_ssid.Ssid, (*candidate)->network.Ssid.Ssid, (*candidate)->network.MacAddress, (int)(*candidate)->network.Rssi); DBG_88E("[to_roaming:%u]\n", pmlmepriv->to_roaming); } exit: return updated; } /* Calling context: The caller of the sub-routine will be in critical section... The caller must hold the following spinlock pmlmepriv->lock */ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv) { int ret; struct list_head *phead; struct adapter *adapter; struct __queue *queue = &(pmlmepriv->scanned_queue); struct wlan_network *pnetwork = NULL; struct wlan_network *candidate = NULL; u8 supp_ant_div = false; _func_enter_; spin_lock_bh(&(pmlmepriv->scanned_queue.lock)); phead = get_list_head(queue); adapter = (struct adapter *)pmlmepriv->nic_hdl; pmlmepriv->pscanned = get_next(phead); while (!rtw_end_of_queue_search(phead, pmlmepriv->pscanned)) { pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list); if (pnetwork == NULL) { RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork==NULL)\n", __func__)); ret = _FAIL; goto exit; } pmlmepriv->pscanned = get_next(pmlmepriv->pscanned); rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork); } if (candidate == NULL) { DBG_88E("%s: return _FAIL(candidate==NULL)\n", __func__); ret = _FAIL; goto exit; } else { DBG_88E("%s: candidate: %s(%pM ch:%u)\n", __func__, candidate->network.Ssid.Ssid, candidate->network.MacAddress, candidate->network.Configuration.DSConfig); } /* check for situation of _FW_LINKED */ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) { DBG_88E("%s: _FW_LINKED while ask_for_joinbss!!!\n", __func__); rtw_disassoc_cmd(adapter, 0, true); rtw_indicate_disconnect(adapter); rtw_free_assoc_resources(adapter, 0); } rtw_hal_get_def_var(adapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &(supp_ant_div)); if (supp_ant_div) { u8 cur_ant; rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(cur_ant)); DBG_88E("#### Opt_Ant_(%s), cur_Ant(%s)\n", (2 == candidate->network.PhyInfo.Optimum_antenna) ? "A" : "B", (2 == cur_ant) ? "A" : "B" ); } ret = rtw_joinbss_cmd(adapter, candidate); exit: spin_unlock_bh(&pmlmepriv->scanned_queue.lock); _func_exit_; return ret; } int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv) { struct cmd_obj *pcmd; struct setauth_parm *psetauthparm; struct cmd_priv *pcmdpriv = &(adapter->cmdpriv); int res = _SUCCESS; _func_enter_; pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj)); if (pcmd == NULL) { res = _FAIL; /* try again */ goto exit; } psetauthparm = (struct setauth_parm *)rtw_zmalloc(sizeof(struct setauth_parm)); if (psetauthparm == NULL) { kfree(pcmd); res = _FAIL; goto exit; } _rtw_memset(psetauthparm, 0, sizeof(struct setauth_parm)); psetauthparm->mode = (unsigned char)psecuritypriv->dot11AuthAlgrthm; pcmd->cmdcode = _SetAuth_CMD_; pcmd->parmbuf = (unsigned char *)psetauthparm; pcmd->cmdsz = (sizeof(struct setauth_parm)); pcmd->rsp = NULL; pcmd->rspsz = 0; _rtw_init_listhead(&pcmd->list); RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("after enqueue set_auth_cmd, auth_mode=%x\n", psecuritypriv->dot11AuthAlgrthm)); res = rtw_enqueue_cmd(pcmdpriv, pcmd); exit: _func_exit_; return res; } int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, int keyid, u8 set_tx) { u8 keylen; struct cmd_obj *pcmd; struct setkey_parm *psetkeyparm; struct cmd_priv *pcmdpriv = &(adapter->cmdpriv); struct mlme_priv *pmlmepriv = &(adapter->mlmepriv); int res = _SUCCESS; _func_enter_; pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj)); if (pcmd == NULL) { res = _FAIL; /* try again */ goto exit; } psetkeyparm = (struct setkey_parm *)rtw_zmalloc(sizeof(struct setkey_parm)); if (psetkeyparm == NULL) { kfree(pcmd); res = _FAIL; goto exit; } _rtw_memset(psetkeyparm, 0, sizeof(struct setkey_parm)); if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) { psetkeyparm->algorithm = (unsigned char)psecuritypriv->dot118021XGrpPrivacy; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n rtw_set_key: psetkeyparm->algorithm=(unsigned char)psecuritypriv->dot118021XGrpPrivacy=%d\n", psetkeyparm->algorithm)); } else { psetkeyparm->algorithm = (u8)psecuritypriv->dot11PrivacyAlgrthm; RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n rtw_set_key: psetkeyparm->algorithm=(u8)psecuritypriv->dot11PrivacyAlgrthm=%d\n", psetkeyparm->algorithm)); } psetkeyparm->keyid = (u8)keyid;/* 0~3 */ psetkeyparm->set_tx = set_tx; pmlmepriv->key_mask |= BIT(psetkeyparm->keyid); DBG_88E("==> rtw_set_key algorithm(%x), keyid(%x), key_mask(%x)\n", psetkeyparm->algorithm, psetkeyparm->keyid, pmlmepriv->key_mask); RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n rtw_set_key: psetkeyparm->algorithm=%d psetkeyparm->keyid=(u8)keyid=%d\n", psetkeyparm->algorithm, keyid)); switch (psetkeyparm->algorithm) { case _WEP40_: keylen = 5; memcpy(&(psetkeyparm->key[0]), &(psecuritypriv->dot11DefKey[keyid].skey[0]), keylen); break; case _WEP104_: keylen = 13; memcpy(&(psetkeyparm->key[0]), &(psecuritypriv->dot11DefKey[keyid].skey[0]), keylen); break; case _TKIP_: keylen = 16; memcpy(&psetkeyparm->key, &psecuritypriv->dot118021XGrpKey[keyid], keylen); psetkeyparm->grpkey = 1; break; case _AES_: keylen = 16; memcpy(&psetkeyparm->key, &psecuritypriv->dot118021XGrpKey[keyid], keylen); psetkeyparm->grpkey = 1; break; default: RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n rtw_set_key:psecuritypriv->dot11PrivacyAlgrthm=%x (must be 1 or 2 or 4 or 5)\n", psecuritypriv->dot11PrivacyAlgrthm)); res = _FAIL; goto exit; } pcmd->cmdcode = _SetKey_CMD_; pcmd->parmbuf = (u8 *)psetkeyparm; pcmd->cmdsz = (sizeof(struct setkey_parm)); pcmd->rsp = NULL; pcmd->rspsz = 0; _rtw_init_listhead(&pcmd->list); res = rtw_enqueue_cmd(pcmdpriv, pcmd); exit: _func_exit_; return res; } /* adjust IEs for rtw_joinbss_cmd in WMM */ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len, uint initial_out_len) { unsigned int ielength = 0; unsigned int i, j; i = 12; /* after the fixed IE */ while (i < in_len) { ielength = initial_out_len; if (in_ie[i] == 0xDD && in_ie[i+2] == 0x00 && in_ie[i+3] == 0x50 && in_ie[i+4] == 0xF2 && in_ie[i+5] == 0x02 && i+5 < in_len) { /* WMM element ID and OUI */ /* Append WMM IE to the last index of out_ie */ for (j = i; j < i + 9; j++) { out_ie[ielength] = in_ie[j]; ielength++; } out_ie[initial_out_len + 1] = 0x07; out_ie[initial_out_len + 6] = 0x00; out_ie[initial_out_len + 8] = 0x00; break; } i += (in_ie[i+1]+2); /* to the next IE element */ } return ielength; } /* */ /* Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */ /* Added by Annie, 2006-05-07. */ /* */ /* Search by BSSID, */ /* Return Value: */ /* -1 :if there is no pre-auth key in the table */ /* >= 0 :if there is pre-auth key, and return the entry id */ /* */ /* */ static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid) { struct security_priv *psecuritypriv = &Adapter->securitypriv; int i = 0; do { if ((psecuritypriv->PMKIDList[i].bUsed) && (_rtw_memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN) == true)) { break; } else { i++; /* continue; */ } } while (i < NUM_PMKID_CACHE); if (i == NUM_PMKID_CACHE) { i = -1;/* Could not find. */ } else { /* There is one Pre-Authentication Key for the specific BSSID. */ } return i; } /* */ /* Check the RSN IE length */ /* If the RSN IE length <= 20, the RSN IE didn't include the PMKID information */ /* 0-11th element in the array are the fixed IE */ /* 12th element in the array is the IE */ /* 13th element in the array is the IE length */ /* */ static int rtw_append_pmkid(struct adapter *Adapter, int iEntry, u8 *ie, uint ie_len) { struct security_priv *psecuritypriv = &Adapter->securitypriv; if (ie[13] <= 20) { /* The RSN IE didn't include the PMK ID, append the PMK information */ ie[ie_len] = 1; ie_len++; ie[ie_len] = 0; /* PMKID count = 0x0100 */ ie_len++; memcpy(&ie[ie_len], &psecuritypriv->PMKIDList[iEntry].PMKID, 16); ie_len += 16; ie[13] += 18;/* PMKID length = 2+16 */ } return ie_len; } int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len) { u8 authmode; uint ielength; int iEntry; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct security_priv *psecuritypriv = &adapter->securitypriv; uint ndisauthmode = psecuritypriv->ndisauthtype; uint ndissecuritytype = psecuritypriv->ndisencryptstatus; _func_enter_; RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_restruct_sec_ie: ndisauthmode=%d ndissecuritytype=%d\n", ndisauthmode, ndissecuritytype)); /* copy fixed ie only */ memcpy(out_ie, in_ie, 12); ielength = 12; if ((ndisauthmode == Ndis802_11AuthModeWPA) || (ndisauthmode == Ndis802_11AuthModeWPAPSK)) authmode = _WPA_IE_ID_; if ((ndisauthmode == Ndis802_11AuthModeWPA2) || (ndisauthmode == Ndis802_11AuthModeWPA2PSK)) authmode = _WPA2_IE_ID_; if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) { memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len); ielength += psecuritypriv->wps_ie_len; } else if ((authmode == _WPA_IE_ID_) || (authmode == _WPA2_IE_ID_)) { /* copy RSN or SSN */ memcpy(&out_ie[ielength], &psecuritypriv->supplicant_ie[0], psecuritypriv->supplicant_ie[1]+2); ielength += psecuritypriv->supplicant_ie[1]+2; rtw_report_sec_ie(adapter, authmode, psecuritypriv->supplicant_ie); } iEntry = SecIsInPMKIDList(adapter, pmlmepriv->assoc_bssid); if (iEntry < 0) { return ielength; } else { if (authmode == _WPA2_IE_ID_) ielength = rtw_append_pmkid(adapter, iEntry, out_ie, ielength); } _func_exit_; return ielength; } void rtw_init_registrypriv_dev_network(struct adapter *adapter) { struct registry_priv *pregistrypriv = &adapter->registrypriv; struct eeprom_priv *peepriv = &adapter->eeprompriv; struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network; u8 *myhwaddr = myid(peepriv); _func_enter_; memcpy(pdev_network->MacAddress, myhwaddr, ETH_ALEN); memcpy(&pdev_network->Ssid, &pregistrypriv->ssid, sizeof(struct ndis_802_11_ssid)); pdev_network->Configuration.Length = sizeof(struct ndis_802_11_config); pdev_network->Configuration.BeaconPeriod = 100; pdev_network->Configuration.FHConfig.Length = 0; pdev_network->Configuration.FHConfig.HopPattern = 0; pdev_network->Configuration.FHConfig.HopSet = 0; pdev_network->Configuration.FHConfig.DwellTime = 0; _func_exit_; } void rtw_update_registrypriv_dev_network(struct adapter *adapter) { int sz = 0; struct registry_priv *pregistrypriv = &adapter->registrypriv; struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network; struct security_priv *psecuritypriv = &adapter->securitypriv; struct wlan_network *cur_network = &adapter->mlmepriv.cur_network; _func_enter_; pdev_network->Privacy = (psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0); /* adhoc no 802.1x */ pdev_network->Rssi = 0; switch (pregistrypriv->wireless_mode) { case WIRELESS_11B: pdev_network->NetworkTypeInUse = (Ndis802_11DS); break; case WIRELESS_11G: case WIRELESS_11BG: case WIRELESS_11_24N: case WIRELESS_11G_24N: case WIRELESS_11BG_24N: pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24); break; case WIRELESS_11A: case WIRELESS_11A_5N: pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5); break; case WIRELESS_11ABGN: if (pregistrypriv->channel > 14) pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5); else pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24); break; default: /* TODO */ break; } pdev_network->Configuration.DSConfig = (pregistrypriv->channel); RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("pregistrypriv->channel=%d, pdev_network->Configuration.DSConfig=0x%x\n", pregistrypriv->channel, pdev_network->Configuration.DSConfig)); if (cur_network->network.InfrastructureMode == Ndis802_11IBSS) pdev_network->Configuration.ATIMWindow = (0); pdev_network->InfrastructureMode = (cur_network->network.InfrastructureMode); /* 1. Supported rates */ /* 2. IE */ sz = rtw_generate_ie(pregistrypriv); pdev_network->IELength = sz; pdev_network->Length = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network); /* notes: translate IELength & Length after assign the Length to cmdsz in createbss_cmd(); */ /* pdev_network->IELength = cpu_to_le32(sz); */ _func_exit_; } void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter) { _func_enter_; _func_exit_; } /* the function is at passive_level */ void rtw_joinbss_reset(struct adapter *padapter) { u8 threshold; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; /* todo: if you want to do something io/reg/hw setting before join_bss, please add code here */ pmlmepriv->num_FortyMHzIntolerant = 0; pmlmepriv->num_sta_no_ht = 0; phtpriv->ampdu_enable = false;/* reset to disabled */ /* TH = 1 => means that invalidate usb rx aggregation */ /* TH = 0 => means that validate usb rx aggregation, use init value. */ if (phtpriv->ht_option) { if (padapter->registrypriv.wifi_spec == 1) threshold = 1; else threshold = 0; rtw_hal_set_hwreg(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold)); } else { threshold = 1; rtw_hal_set_hwreg(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold)); } } /* the function is >= passive_level */ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_ie, uint in_len, uint *pout_len) { u32 ielen, out_len; enum ht_cap_ampdu_factor max_rx_ampdu_factor; unsigned char *p; struct rtw_ieee80211_ht_cap ht_capie; unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00}; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv = &pmlmepriv->qospriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; u32 rx_packet_offset, max_recvbuf_sz; phtpriv->ht_option = false; p = rtw_get_ie(in_ie+12, _HT_CAPABILITY_IE_, &ielen, in_len-12); if (p && ielen > 0) { if (pqospriv->qos_option == 0) { out_len = *pout_len; rtw_set_ie(out_ie+out_len, _VENDOR_SPECIFIC_IE_, _WMM_IE_Length_, WMM_IE, pout_len); pqospriv->qos_option = 1; } out_len = *pout_len; _rtw_memset(&ht_capie, 0, sizeof(struct rtw_ieee80211_ht_cap)); ht_capie.cap_info = IEEE80211_HT_CAP_SUP_WIDTH | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_DSSSCCK40; rtw_hal_get_def_var(padapter, HAL_DEF_RX_PACKET_OFFSET, &rx_packet_offset); rtw_hal_get_def_var(padapter, HAL_DEF_MAX_RECVBUF_SZ, &max_recvbuf_sz); /* AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k AMPDU_para [4:2]:Min MPDU Start Spacing */ rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor); ht_capie.ampdu_params_info = (max_rx_ampdu_factor&0x03); if (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_) ht_capie.ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&(0x07<<2)); else ht_capie.ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&0x00); rtw_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_, sizeof(struct rtw_ieee80211_ht_cap), (unsigned char *)&ht_capie, pout_len); phtpriv->ht_option = true; p = rtw_get_ie(in_ie+12, _HT_ADD_INFO_IE_, &ielen, in_len-12); if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) { out_len = *pout_len; rtw_set_ie(out_ie+out_len, _HT_ADD_INFO_IE_, ielen, p+2 , pout_len); } } return phtpriv->ht_option; } /* the function is > passive_level (in critical_section) */ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len) { u8 *p, max_ampdu_sz; int len; struct rtw_ieee80211_ht_cap *pht_capie; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; struct registry_priv *pregistrypriv = &padapter->registrypriv; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); if (!phtpriv->ht_option) return; if ((!pmlmeinfo->HT_info_enable) || (!pmlmeinfo->HT_caps_enable)) return; DBG_88E("+rtw_update_ht_cap()\n"); /* maybe needs check if ap supports rx ampdu. */ if ((!phtpriv->ampdu_enable) && (pregistrypriv->ampdu_enable == 1)) { if (pregistrypriv->wifi_spec == 1) phtpriv->ampdu_enable = false; else phtpriv->ampdu_enable = true; } else if (pregistrypriv->ampdu_enable == 2) { phtpriv->ampdu_enable = true; } /* check Max Rx A-MPDU Size */ len = 0; p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fixed_ie), _HT_CAPABILITY_IE_, &len, ie_len-sizeof(struct ndis_802_11_fixed_ie)); if (p && len > 0) { pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2); max_ampdu_sz = (pht_capie->ampdu_params_info & IEEE80211_HT_CAP_AMPDU_FACTOR); max_ampdu_sz = 1 << (max_ampdu_sz+3); /* max_ampdu_sz (kbytes); */ phtpriv->rx_ampdu_maxlen = max_ampdu_sz; } len = 0; p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fixed_ie), _HT_ADD_INFO_IE_, &len, ie_len-sizeof(struct ndis_802_11_fixed_ie)); /* update cur_bwmode & cur_ch_offset */ if ((pregistrypriv->cbw40_enable) && (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & BIT(1)) && (pmlmeinfo->HT_info.infos[0] & BIT(2))) { int i; u8 rf_type; padapter->HalFunc.GetHwRegHandler(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type)); /* update the MCS rates */ for (i = 0; i < 16; i++) { if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R)) pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i]; else pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R[i]; } /* switch to the 40M Hz mode according to the AP */ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40; switch ((pmlmeinfo->HT_info.infos[0] & 0x3)) { case HT_EXTCHNL_OFFSET_UPPER: pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER; break; case HT_EXTCHNL_OFFSET_LOWER: pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER; break; default: pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE; break; } } /* Config SM Power Save setting */ pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & 0x0C) >> 2; if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC) DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__); /* Config current HT Protection mode. */ pmlmeinfo->HT_protection = pmlmeinfo->HT_info.infos[1] & 0x3; } void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitframe) { u8 issued; int priority; struct sta_info *psta = NULL; struct ht_priv *phtpriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; s32 bmcst = IS_MCAST(pattrib->ra); if (bmcst || (padapter->mlmepriv.LinkDetectInfo.NumTxOkInPeriod < 100)) return; priority = pattrib->priority; if (pattrib->psta) psta = pattrib->psta; else psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra); if (psta == NULL) return; phtpriv = &psta->htpriv; if ((phtpriv->ht_option) && (phtpriv->ampdu_enable)) { issued = (phtpriv->agg_enable_bitmap>>priority)&0x1; issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1; if (0 == issued) { DBG_88E("rtw_issue_addbareq_cmd, p=%d\n", priority); psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority); rtw_addbareq_cmd(padapter, (u8) priority, pattrib->ra); } } } void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; spin_lock_bh(&pmlmepriv->lock); _rtw_roaming(padapter, tgt_network); spin_unlock_bh(&pmlmepriv->lock); } void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; int do_join_r; struct wlan_network *pnetwork; if (tgt_network != NULL) pnetwork = tgt_network; else pnetwork = &pmlmepriv->cur_network; if (0 < pmlmepriv->to_roaming) { DBG_88E("roaming from %s(%pM length:%d\n", pnetwork->network.Ssid.Ssid, pnetwork->network.MacAddress, pnetwork->network.Ssid.SsidLength); memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.Ssid, sizeof(struct ndis_802_11_ssid)); pmlmepriv->assoc_by_bssid = false; while (1) { do_join_r = rtw_do_join(padapter); if (_SUCCESS == do_join_r) { break; } else { DBG_88E("roaming do_join return %d\n", do_join_r); pmlmepriv->to_roaming--; if (0 < pmlmepriv->to_roaming) { continue; } else { DBG_88E("%s(%d) -to roaming fail, indicate_disconnect\n", __func__, __LINE__); rtw_indicate_disconnect(padapter); break; } } } } }
gpl-2.0
shobhitka/linux-kernel
drivers/gpu/drm/virtio/virtgpu_fb.c
249
11679
/* * Copyright (C) 2015 Red Hat, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <drm/drmP.h> #include <drm/drm_fb_helper.h> #include "virtgpu_drv.h" #define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60) struct virtio_gpu_fbdev { struct drm_fb_helper helper; struct virtio_gpu_framebuffer vgfb; struct virtio_gpu_device *vgdev; struct delayed_work work; }; static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb, bool store, int x, int y, int width, int height) { struct drm_device *dev = fb->base.dev; struct virtio_gpu_device *vgdev = dev->dev_private; bool store_for_later = false; int bpp = fb->base.bits_per_pixel / 8; int x2, y2; unsigned long flags; struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj); if ((width <= 0) || (x + width > fb->base.width) || (y + height > fb->base.height)) { DRM_DEBUG("values out of range %dx%d+%d+%d, fb %dx%d\n", width, height, x, y, fb->base.width, fb->base.height); return -EINVAL; } /* * Can be called with pretty much any context (console output * path). If we are in atomic just store the dirty rect info * to send out the update later. * * Can't test inside spin lock. */ if (in_atomic() || store) store_for_later = true; x2 = x + width - 1; y2 = y + height - 1; spin_lock_irqsave(&fb->dirty_lock, flags); if (fb->y1 < y) y = fb->y1; if (fb->y2 > y2) y2 = fb->y2; if (fb->x1 < x) x = fb->x1; if (fb->x2 > x2) x2 = fb->x2; if (store_for_later) { fb->x1 = x; fb->x2 = x2; fb->y1 = y; fb->y2 = y2; spin_unlock_irqrestore(&fb->dirty_lock, flags); return 0; } fb->x1 = fb->y1 = INT_MAX; fb->x2 = fb->y2 = 0; spin_unlock_irqrestore(&fb->dirty_lock, flags); { uint32_t offset; uint32_t w = x2 - x + 1; uint32_t h = y2 - y + 1; offset = (y * fb->base.pitches[0]) + x * bpp; virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj->hw_res_handle, offset, cpu_to_le32(w), cpu_to_le32(h), cpu_to_le32(x), cpu_to_le32(y), NULL); } virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle, x, y, x2 - x + 1, y2 - y + 1); return 0; } int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb, struct drm_clip_rect *clips, unsigned num_clips) { struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private; struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj); struct drm_clip_rect norect; struct drm_clip_rect *clips_ptr; int left, right, top, bottom; int i; int inc = 1; if (!num_clips) { num_clips = 1; clips = &norect; norect.x1 = norect.y1 = 0; norect.x2 = vgfb->base.width; norect.y2 = vgfb->base.height; } left = clips->x1; right = clips->x2; top = clips->y1; bottom = clips->y2; /* skip the first clip rect */ for (i = 1, clips_ptr = clips + inc; i < num_clips; i++, clips_ptr += inc) { left = min_t(int, left, (int)clips_ptr->x1); right = max_t(int, right, (int)clips_ptr->x2); top = min_t(int, top, (int)clips_ptr->y1); bottom = max_t(int, bottom, (int)clips_ptr->y2); } if (obj->dumb) return virtio_gpu_dirty_update(vgfb, false, left, top, right - left, bottom - top); virtio_gpu_cmd_resource_flush(vgdev, obj->hw_res_handle, left, top, right - left, bottom - top); return 0; } static void virtio_gpu_fb_dirty_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct virtio_gpu_fbdev *vfbdev = container_of(delayed_work, struct virtio_gpu_fbdev, work); struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb; virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1, vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1); } static void virtio_gpu_3d_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct virtio_gpu_fbdev *vfbdev = info->par; drm_fb_helper_sys_fillrect(info, rect); virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy, rect->width, rect->height); schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); } static void virtio_gpu_3d_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct virtio_gpu_fbdev *vfbdev = info->par; drm_fb_helper_sys_copyarea(info, area); virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy, area->width, area->height); schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); } static void virtio_gpu_3d_imageblit(struct fb_info *info, const struct fb_image *image) { struct virtio_gpu_fbdev *vfbdev = info->par; drm_fb_helper_sys_imageblit(info, image); virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy, image->width, image->height); schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD); } static struct fb_ops virtio_gpufb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */ .fb_fillrect = virtio_gpu_3d_fillrect, .fb_copyarea = virtio_gpu_3d_copyarea, .fb_imageblit = virtio_gpu_3d_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, .fb_debug_enter = drm_fb_helper_debug_enter, .fb_debug_leave = drm_fb_helper_debug_leave, }; static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *obj) { return virtio_gpu_object_kmap(obj, NULL); } static int virtio_gpufb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct virtio_gpu_fbdev *vfbdev = container_of(helper, struct virtio_gpu_fbdev, helper); struct drm_device *dev = helper->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd = {}; struct virtio_gpu_object *obj; uint32_t resid, format, size; int ret; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = mode_cmd.width * 4; mode_cmd.pixel_format = drm_mode_legacy_fb_format(32, 24); switch (mode_cmd.pixel_format) { #ifdef __BIG_ENDIAN case DRM_FORMAT_XRGB8888: format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; break; case DRM_FORMAT_ARGB8888: format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; break; case DRM_FORMAT_BGRX8888: format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; break; case DRM_FORMAT_BGRA8888: format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; break; case DRM_FORMAT_RGBX8888: format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM; break; case DRM_FORMAT_RGBA8888: format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM; break; case DRM_FORMAT_XBGR8888: format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM; break; case DRM_FORMAT_ABGR8888: format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM; break; #else case DRM_FORMAT_XRGB8888: format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; break; case DRM_FORMAT_ARGB8888: format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; break; case DRM_FORMAT_BGRX8888: format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; break; case DRM_FORMAT_BGRA8888: format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; break; case DRM_FORMAT_RGBX8888: format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM; break; case DRM_FORMAT_RGBA8888: format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM; break; case DRM_FORMAT_XBGR8888: format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM; break; case DRM_FORMAT_ABGR8888: format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM; break; #endif default: DRM_ERROR("failed to find virtio gpu format for %d\n", mode_cmd.pixel_format); return -EINVAL; } size = mode_cmd.pitches[0] * mode_cmd.height; obj = virtio_gpu_alloc_object(dev, size, false, true); if (IS_ERR(obj)) return PTR_ERR(obj); virtio_gpu_resource_id_get(vgdev, &resid); virtio_gpu_cmd_create_resource(vgdev, resid, format, mode_cmd.width, mode_cmd.height); ret = virtio_gpu_vmap_fb(vgdev, obj); if (ret) { DRM_ERROR("failed to vmap fb %d\n", ret); goto err_obj_vmap; } /* attach the object to the resource */ ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL); if (ret) goto err_obj_attach; info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { ret = PTR_ERR(info); goto err_fb_alloc; } info->par = helper; ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb, &mode_cmd, &obj->gem_base); if (ret) goto err_fb_init; fb = &vfbdev->vgfb.base; vfbdev->helper.fb = fb; strcpy(info->fix.id, "virtiodrmfb"); info->flags = FBINFO_DEFAULT; info->fbops = &virtio_gpufb_ops; info->pixmap.flags = FB_PIXMAP_SYSTEM; info->screen_base = obj->vmap; info->screen_size = obj->gem_base.size; drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, &vfbdev->helper, sizes->fb_width, sizes->fb_height); info->fix.mmio_start = 0; info->fix.mmio_len = 0; return 0; err_fb_init: drm_fb_helper_release_fbi(helper); err_fb_alloc: virtio_gpu_cmd_resource_inval_backing(vgdev, resid); err_obj_attach: err_obj_vmap: virtio_gpu_gem_free_object(&obj->gem_base); return ret; } static int virtio_gpu_fbdev_destroy(struct drm_device *dev, struct virtio_gpu_fbdev *vgfbdev) { struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb; drm_fb_helper_unregister_fbi(&vgfbdev->helper); drm_fb_helper_release_fbi(&vgfbdev->helper); if (vgfb->obj) vgfb->obj = NULL; drm_fb_helper_fini(&vgfbdev->helper); drm_framebuffer_cleanup(&vgfb->base); return 0; } static struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = { .fb_probe = virtio_gpufb_create, }; int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev) { struct virtio_gpu_fbdev *vgfbdev; int bpp_sel = 32; /* TODO: parameter from somewhere? */ int ret; vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL); if (!vgfbdev) return -ENOMEM; vgfbdev->vgdev = vgdev; vgdev->vgfbdev = vgfbdev; INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work); drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper, &virtio_gpu_fb_helper_funcs); ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper, vgdev->num_scanouts, VIRTIO_GPUFB_CONN_LIMIT); if (ret) { kfree(vgfbdev); return ret; } drm_fb_helper_single_add_all_connectors(&vgfbdev->helper); drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel); return 0; } void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev) { if (!vgdev->vgfbdev) return; virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev); kfree(vgdev->vgfbdev); vgdev->vgfbdev = NULL; }
gpl-2.0
Wenzel/kvm
drivers/gpu/drm/qxl/qxl_drv.c
249
7848
/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */ /* qxl_drv.c -- QXL driver -*- linux-c -*- * * Copyright 2011 Red Hat, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Dave Airlie <airlie@redhat.com> * Alon Levy <alevy@redhat.com> */ #include <linux/module.h> #include <linux/console.h> #include "drmP.h" #include "drm/drm.h" #include "drm_crtc_helper.h" #include "qxl_drv.h" #include "qxl_object.h" extern int qxl_max_ioctls; static const struct pci_device_id pciidlist[] = { { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0 }, { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8, 0xffff00, 0 }, { 0, 0, 0 }, }; MODULE_DEVICE_TABLE(pci, pciidlist); static int qxl_modeset = -1; int qxl_num_crtc = 4; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, qxl_modeset, int, 0400); MODULE_PARM_DESC(num_heads, "Number of virtual crtcs to expose (default 4)"); module_param_named(num_heads, qxl_num_crtc, int, 0400); static struct drm_driver qxl_driver; static struct pci_driver qxl_pci_driver; static int qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { if (pdev->revision < 4) { DRM_ERROR("qxl too old, doesn't support client_monitors_config," " use xf86-video-qxl in user mode"); return -EINVAL; /* TODO: ENODEV ? */ } return drm_get_pci_dev(pdev, ent, &qxl_driver); } static void qxl_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); drm_put_dev(dev); } static const struct file_operations qxl_fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, .poll = drm_poll, .read = drm_read, .mmap = qxl_mmap, }; static int qxl_drm_freeze(struct drm_device *dev) { struct pci_dev *pdev = dev->pdev; struct qxl_device *qdev = dev->dev_private; struct drm_crtc *crtc; drm_kms_helper_poll_disable(dev); console_lock(); qxl_fbdev_set_suspend(qdev, 1); console_unlock(); /* unpin the front buffers */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; if (crtc->enabled) (*crtc_funcs->disable)(crtc); } qxl_destroy_monitors_object(qdev); qxl_surf_evict(qdev); qxl_vram_evict(qdev); while (!qxl_check_idle(qdev->command_ring)); while (!qxl_check_idle(qdev->release_ring)) qxl_queue_garbage_collect(qdev, 1); pci_save_state(pdev); return 0; } static int qxl_drm_resume(struct drm_device *dev, bool thaw) { struct qxl_device *qdev = dev->dev_private; qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; if (!thaw) { qxl_reinit_memslots(qdev); qxl_ring_init_hdr(qdev->release_ring); } qxl_create_monitors_object(qdev); drm_helper_resume_force_mode(dev); console_lock(); qxl_fbdev_set_suspend(qdev, 0); console_unlock(); drm_kms_helper_poll_enable(dev); return 0; } static int qxl_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); int error; error = qxl_drm_freeze(drm_dev); if (error) return error; pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } static int qxl_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); if (pci_enable_device(pdev)) { return -EIO; } return qxl_drm_resume(drm_dev, false); } static int qxl_pm_thaw(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); return qxl_drm_resume(drm_dev, true); } static int qxl_pm_freeze(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); return qxl_drm_freeze(drm_dev); } static int qxl_pm_restore(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct qxl_device *qdev = drm_dev->dev_private; qxl_io_reset(qdev); return qxl_drm_resume(drm_dev, false); } static u32 qxl_noop_get_vblank_counter(struct drm_device *dev, unsigned int pipe) { return 0; } static int qxl_noop_enable_vblank(struct drm_device *dev, unsigned int pipe) { return 0; } static void qxl_noop_disable_vblank(struct drm_device *dev, unsigned int pipe) { } static const struct dev_pm_ops qxl_pm_ops = { .suspend = qxl_pm_suspend, .resume = qxl_pm_resume, .freeze = qxl_pm_freeze, .thaw = qxl_pm_thaw, .poweroff = qxl_pm_freeze, .restore = qxl_pm_restore, }; static struct pci_driver qxl_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, .probe = qxl_pci_probe, .remove = qxl_pci_remove, .driver.pm = &qxl_pm_ops, }; static struct drm_driver qxl_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .load = qxl_driver_load, .unload = qxl_driver_unload, .get_vblank_counter = qxl_noop_get_vblank_counter, .enable_vblank = qxl_noop_enable_vblank, .disable_vblank = qxl_noop_disable_vblank, .set_busid = drm_pci_set_busid, .dumb_create = qxl_mode_dumb_create, .dumb_map_offset = qxl_mode_dumb_mmap, .dumb_destroy = drm_gem_dumb_destroy, #if defined(CONFIG_DEBUG_FS) .debugfs_init = qxl_debugfs_init, .debugfs_cleanup = qxl_debugfs_takedown, #endif .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = qxl_gem_prime_pin, .gem_prime_unpin = qxl_gem_prime_unpin, .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table, .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, .gem_prime_vmap = qxl_gem_prime_vmap, .gem_prime_vunmap = qxl_gem_prime_vunmap, .gem_prime_mmap = qxl_gem_prime_mmap, .gem_free_object = qxl_gem_object_free, .gem_open_object = qxl_gem_object_open, .gem_close_object = qxl_gem_object_close, .fops = &qxl_fops, .ioctls = qxl_ioctls, .irq_handler = qxl_irq_handler, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = 0, .minor = 1, .patchlevel = 0, }; static int __init qxl_init(void) { #ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force() && qxl_modeset == -1) return -EINVAL; #endif if (qxl_modeset == 0) return -EINVAL; qxl_driver.num_ioctls = qxl_max_ioctls; return drm_pci_init(&qxl_driver, &qxl_pci_driver); } static void __exit qxl_exit(void) { drm_pci_exit(&qxl_driver, &qxl_pci_driver); } module_init(qxl_init); module_exit(qxl_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
ench0/android_kernel_samsung_hltez
net/ipv6/icmp.c
505
23210
/* * Internet Control Message Protocol (ICMPv6) * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on net/ipv4/icmp.c * * RFC 1885 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * Changes: * * Andi Kleen : exception handling * Andi Kleen add rate limits. never reply to a icmp. * add more length checks and other fixes. * yoshfuji : ensure to sent parameter problem for * fragments. * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit. * Randy Dunlap and * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data */ #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/netfilter.h> #include <linux/slab.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/icmpv6.h> #include <net/ip.h> #include <net/sock.h> #include <net/ipv6.h> #include <net/ip6_checksum.h> #include <net/ping.h> #include <net/protocol.h> #include <net/raw.h> #include <net/rawv6.h> #include <net/transp_v6.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/icmp.h> #include <net/xfrm.h> #include <net/inet_common.h> #include <asm/uaccess.h> /* * The ICMP socket(s). This is the most convenient way to flow control * our ICMP output as well as maintain a clean interface throughout * all layers. All Socketless IP sends will soon be gone. * * On SMP we have one ICMP socket per-cpu. */ static inline struct sock *icmpv6_sk(struct net *net) { return net->ipv6.icmp_sk[smp_processor_id()]; } static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { /* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */ struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset); if (!(type & ICMPV6_INFOMSG_MASK)) if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST) ping_err(skb, offset, info); } static int icmpv6_rcv(struct sk_buff *skb); static const struct inet6_protocol icmpv6_protocol = { .handler = icmpv6_rcv, .err_handler = icmpv6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) { struct sock *sk; local_bh_disable(); sk = icmpv6_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path (f.e. SIT or * ip6ip6 tunnel) signals dst_link_failure() for an * outgoing ICMP6 packet. */ local_bh_enable(); return NULL; } return sk; } static __inline__ void icmpv6_xmit_unlock(struct sock *sk) { spin_unlock_bh(&sk->sk_lock.slock); } /* * Slightly more convenient version of icmpv6_send. */ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) { icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos); kfree_skb(skb); } /* * Figure out, may we reply to this packet with icmp error. * * We do not reply, if: * - it was icmp error message. * - it is truncated, so that it is known, that protocol is ICMPV6 * (i.e. in the middle of some exthdr) * * --ANK (980726) */ static int is_ineligible(struct sk_buff *skb) { int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; int len = skb->len - ptr; __u8 nexthdr = ipv6_hdr(skb)->nexthdr; __be16 frag_off; if (len < 0) return 1; ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off); if (ptr < 0) return 0; if (nexthdr == IPPROTO_ICMPV6) { u8 _type, *tp; tp = skb_header_pointer(skb, ptr+offsetof(struct icmp6hdr, icmp6_type), sizeof(_type), &_type); if (tp == NULL || !(*tp & ICMPV6_INFOMSG_MASK)) return 1; } return 0; } /* * Check the ICMP output rate limit */ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type, struct flowi6 *fl6) { struct dst_entry *dst; struct net *net = sock_net(sk); bool res = false; /* Informational messages are not limited. */ if (type & ICMPV6_INFOMSG_MASK) return true; /* Do not limit pmtu discovery, it would break it. */ if (type == ICMPV6_PKT_TOOBIG) return true; /* * Look up the output route. * XXX: perhaps the expire for routing entries cloned by * this lookup should be more aggressive (not longer than timeout). */ dst = ip6_route_output(net, sk, fl6); if (dst->error) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) { res = true; } else { struct rt6_info *rt = (struct rt6_info *)dst; int tmo = net->ipv6.sysctl.icmpv6_time; /* Give more bandwidth to wider prefixes. */ if (rt->rt6i_dst.plen < 128) tmo >>= ((128 - rt->rt6i_dst.plen)>>5); if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo); } dst_release(dst); return res; } /* * an inline helper for the "simple" if statement below * checks if parameter problem report is caused by an * unrecognized IPv6 option that has the Option Type * highest-order two bits set to 10 */ static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset) { u8 _optval, *op; offset += skb_network_offset(skb); op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval); if (op == NULL) return 1; return (*op & 0xC0) == 0x80; } int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len) { struct sk_buff *skb; struct icmp6hdr *icmp6h; int err = 0; if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; icmp6h = icmp6_hdr(skb); memcpy(icmp6h, thdr, sizeof(struct icmp6hdr)); icmp6h->icmp6_cksum = 0; if (skb_queue_len(&sk->sk_write_queue) == 1) { skb->csum = csum_partial(icmp6h, sizeof(struct icmp6hdr), skb->csum); icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, len, fl6->flowi6_proto, skb->csum); } else { __wsum tmp_csum = 0; skb_queue_walk(&sk->sk_write_queue, skb) { tmp_csum = csum_add(tmp_csum, skb->csum); } tmp_csum = csum_partial(icmp6h, sizeof(struct icmp6hdr), tmp_csum); icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, len, fl6->flowi6_proto, tmp_csum); } ip6_push_pending_frames(sk); out: return err; } struct icmpv6_msg { struct sk_buff *skb; int offset; uint8_t type; }; static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct icmpv6_msg *msg = (struct icmpv6_msg *) from; struct sk_buff *org_skb = msg->skb; __wsum csum = 0; csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset, to, len, csum); skb->csum = csum_block_add(skb->csum, csum, odd); if (!(msg->type & ICMPV6_INFOMSG_MASK)) nf_ct_attach(skb, org_skb); return 0; } #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) static void mip6_addr_swap(struct sk_buff *skb) { struct ipv6hdr *iph = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct ipv6_destopt_hao *hao; struct in6_addr tmp; int off; if (opt->dsthao) { off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO); if (likely(off >= 0)) { hao = (struct ipv6_destopt_hao *) (skb_network_header(skb) + off); tmp = iph->saddr; iph->saddr = hao->addr; hao->addr = tmp; } } } #else static inline void mip6_addr_swap(struct sk_buff *skb) {} #endif struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb, struct sock *sk, struct flowi6 *fl6) { struct dst_entry *dst, *dst2; struct flowi6 fl2; int err; err = ip6_dst_lookup(sk, &dst, fl6); if (err) return ERR_PTR(err); /* * We won't send icmp if the destination is known * anycast. */ if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) { LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n"); dst_release(dst); return ERR_PTR(-EINVAL); } /* No need to clone since we're just using its address. */ dst2 = dst; dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0); if (!IS_ERR(dst)) { if (dst != dst2) return dst; } else { if (PTR_ERR(dst) == -EPERM) dst = NULL; else return dst; } err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6); if (err) goto relookup_failed; err = ip6_dst_lookup(sk, &dst2, &fl2); if (err) goto relookup_failed; dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP); if (!IS_ERR(dst2)) { dst_release(dst); dst = dst2; } else { err = PTR_ERR(dst2); if (err == -EPERM) { dst_release(dst); return dst2; } else goto relookup_failed; } relookup_failed: if (dst) return dst; return ERR_PTR(err); } /* * Send an ICMP message in response to a packet in error */ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) { struct net *net = dev_net(skb->dev); struct inet6_dev *idev = NULL; struct ipv6hdr *hdr = ipv6_hdr(skb); struct sock *sk; struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; struct dst_entry *dst; struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; int iif = 0; int addr_type = 0; int len; int hlimit; int err = 0; u32 mark = IP6_REPLY_MARK(net, skb->mark); if ((u8 *)hdr < skb->head || (skb->network_header + sizeof(*hdr)) > skb->tail) return; /* * Make sure we respect the rules * i.e. RFC 1885 2.4(e) * Rule (e.1) is enforced by not using icmpv6_send * in any code that processes icmp errors. */ addr_type = ipv6_addr_type(&hdr->daddr); if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0)) saddr = &hdr->daddr; /* * Dest addr check */ if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) { if (type != ICMPV6_PKT_TOOBIG && !(type == ICMPV6_PARAMPROB && code == ICMPV6_UNK_OPTION && (opt_unrec(skb, info)))) return; saddr = NULL; } addr_type = ipv6_addr_type(&hdr->saddr); /* * Source addr check */ if (addr_type & IPV6_ADDR_LINKLOCAL) iif = skb->dev->ifindex; /* * Must not send error if the source does not uniquely * identify a single node (RFC2463 Section 2.4). * We check unspecified / multicast addresses here, * and anycast addresses will be checked later. */ if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) { LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n"); return; } /* * Never answer to a ICMP packet. */ if (is_ineligible(skb)) { LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n"); return; } mip6_addr_swap(skb); memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; fl6.daddr = hdr->saddr; if (saddr) fl6.saddr = *saddr; fl6.flowi6_mark = mark; fl6.flowi6_oif = iif; fl6.fl6_icmp_type = type; fl6.fl6_icmp_code = code; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); sk = icmpv6_xmit_lock(net); if (sk == NULL) return; sk->sk_mark = mark; np = inet6_sk(sk); if (!icmpv6_xrlim_allow(sk, type, &fl6)) goto out; tmp_hdr.icmp6_type = type; tmp_hdr.icmp6_code = code; tmp_hdr.icmp6_cksum = 0; tmp_hdr.icmp6_pointer = htonl(info); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = np->mcast_oif; else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; dst = icmpv6_route_lookup(net, skb, sk, &fl6); if (IS_ERR(dst)) goto out; if (ipv6_addr_is_multicast(&fl6.daddr)) hlimit = np->mcast_hops; else hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); msg.skb = skb; msg.offset = skb_network_offset(skb); msg.type = type; len = skb->len - msg.offset; len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr)); if (len < 0) { LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n"); goto out_dst_release; } rcu_read_lock(); idev = __in6_dev_get(skb->dev); err = ip6_append_data(sk, icmpv6_getfrag, &msg, len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6, (struct rt6_info*)dst, MSG_DONTWAIT, np->dontfrag); if (err) { ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, len + sizeof(struct icmp6hdr)); } rcu_read_unlock(); out_dst_release: dst_release(dst); out: icmpv6_xmit_unlock(sk); } EXPORT_SYMBOL(icmpv6_send); static void icmpv6_echo_reply(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); struct sock *sk; struct inet6_dev *idev; struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; struct icmp6hdr *icmph = icmp6_hdr(skb); struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; struct dst_entry *dst; int err = 0; int hlimit; u32 mark = IP6_REPLY_MARK(net, skb->mark); saddr = &ipv6_hdr(skb)->daddr; if (!ipv6_unicast_destination(skb)) saddr = NULL; memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr)); tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; fl6.daddr = ipv6_hdr(skb)->saddr; if (saddr) fl6.saddr = *saddr; fl6.flowi6_oif = skb->dev->ifindex; fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; fl6.flowi6_mark = mark; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); sk = icmpv6_xmit_lock(net); if (sk == NULL) return; sk->sk_mark = mark; np = inet6_sk(sk); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = np->mcast_oif; else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; err = ip6_dst_lookup(sk, &dst, &fl6); if (err) goto out; dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0); if (IS_ERR(dst)) goto out; if (ipv6_addr_is_multicast(&fl6.daddr)) hlimit = np->mcast_hops; else hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); idev = __in6_dev_get(skb->dev); msg.skb = skb; msg.offset = 0; msg.type = ICMPV6_ECHO_REPLY; err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6, (struct rt6_info*)dst, MSG_DONTWAIT, np->dontfrag); if (err) { ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, skb->len + sizeof(struct icmp6hdr)); } dst_release(dst); out: icmpv6_xmit_unlock(sk); } void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) { const struct inet6_protocol *ipprot; int inner_offset; int hash; u8 nexthdr; __be16 frag_off; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) return; nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr; if (ipv6_ext_hdr(nexthdr)) { /* now skip over extension headers */ inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); if (inner_offset<0) return; } else { inner_offset = sizeof(struct ipv6hdr); } /* Checkin header including 8 bytes of inner protocol header. */ if (!pskb_may_pull(skb, inner_offset+8)) return; /* BUGGG_FUTURE: we should try to parse exthdrs in this packet. Without this we will not able f.e. to make source routed pmtu discovery. Corresponding argument (opt) to notifiers is already added. --ANK (980726) */ hash = nexthdr & (MAX_INET_PROTOS - 1); rcu_read_lock(); ipprot = rcu_dereference(inet6_protos[hash]); if (ipprot && ipprot->err_handler) ipprot->err_handler(skb, NULL, type, code, inner_offset, info); rcu_read_unlock(); raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info); } /* * Handle icmp messages */ static int icmpv6_rcv(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct inet6_dev *idev = __in6_dev_get(dev); const struct in6_addr *saddr, *daddr; const struct ipv6hdr *orig_hdr; struct icmp6hdr *hdr; u8 type; if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { struct sec_path *sp = skb_sec_path(skb); int nh; if (!(sp && sp->xvec[sp->len - 1]->props.flags & XFRM_STATE_ICMP)) goto drop_no_count; if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr))) goto drop_no_count; nh = skb_network_offset(skb); skb_set_network_header(skb, sizeof(*hdr)); if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb)) goto drop_no_count; skb_set_network_header(skb, nh); } ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS); saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; /* Perform checksum. */ switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, skb->csum)) break; /* fall through */ case CHECKSUM_NONE: skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, 0)); if (__skb_checksum_complete(skb)) { LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6c > %pI6c]\n", saddr, daddr); goto discard_it; } } if (!pskb_pull(skb, sizeof(*hdr))) goto discard_it; hdr = icmp6_hdr(skb); type = hdr->icmp6_type; ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type); switch (type) { case ICMPV6_ECHO_REQUEST: icmpv6_echo_reply(skb); break; case ICMPV6_ECHO_REPLY: ping_rcv(skb); break; case ICMPV6_PKT_TOOBIG: /* BUGGG_FUTURE: if packet contains rthdr, we cannot update standard destination cache. Seems, only "advanced" destination cache will allow to solve this problem --ANK (980726) */ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto discard_it; hdr = icmp6_hdr(skb); orig_hdr = (struct ipv6hdr *) (hdr + 1); rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev, ntohl(hdr->icmp6_mtu)); /* * Drop through to notify */ case ICMPV6_DEST_UNREACH: case ICMPV6_TIME_EXCEED: case ICMPV6_PARAMPROB: icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); break; case NDISC_ROUTER_SOLICITATION: case NDISC_ROUTER_ADVERTISEMENT: case NDISC_NEIGHBOUR_SOLICITATION: case NDISC_NEIGHBOUR_ADVERTISEMENT: case NDISC_REDIRECT: ndisc_rcv(skb); break; case ICMPV6_MGM_QUERY: igmp6_event_query(skb); break; case ICMPV6_MGM_REPORT: igmp6_event_report(skb); break; case ICMPV6_MGM_REDUCTION: case ICMPV6_NI_QUERY: case ICMPV6_NI_REPLY: case ICMPV6_MLD2_REPORT: case ICMPV6_DHAAD_REQUEST: case ICMPV6_DHAAD_REPLY: case ICMPV6_MOBILE_PREFIX_SOL: case ICMPV6_MOBILE_PREFIX_ADV: break; default: LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n"); /* informational */ if (type & ICMPV6_INFOMSG_MASK) break; /* * error of unknown type. * must pass to upper level */ icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); } kfree_skb(skb); return 0; discard_it: ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS); drop_no_count: kfree_skb(skb); return 0; } void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, u8 type, const struct in6_addr *saddr, const struct in6_addr *daddr, int oif) { memset(fl6, 0, sizeof(*fl6)); fl6->saddr = *saddr; fl6->daddr = *daddr; fl6->flowi6_proto = IPPROTO_ICMPV6; fl6->fl6_icmp_type = type; fl6->fl6_icmp_code = 0; fl6->flowi6_oif = oif; security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); } /* * Special lock-class for __icmpv6_sk: */ static struct lock_class_key icmpv6_socket_sk_dst_lock_key; static int __net_init icmpv6_sk_init(struct net *net) { struct sock *sk; int err, i, j; net->ipv6.icmp_sk = kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL); if (net->ipv6.icmp_sk == NULL) return -ENOMEM; for_each_possible_cpu(i) { err = inet_ctl_sock_create(&sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { printk(KERN_ERR "Failed to initialize the ICMP6 control socket " "(err %d).\n", err); goto fail; } net->ipv6.icmp_sk[i] = sk; /* * Split off their lock-class, because sk->sk_dst_lock * gets used from softirqs, which is safe for * __icmpv6_sk (because those never get directly used * via userspace syscalls), but unsafe for normal sockets. */ lockdep_set_class(&sk->sk_dst_lock, &icmpv6_socket_sk_dst_lock_key); /* Enough space for 2 64K ICMP packets, including * sk_buff struct overhead. */ sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024); } return 0; fail: for (j = 0; j < i; j++) inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]); kfree(net->ipv6.icmp_sk); return err; } static void __net_exit icmpv6_sk_exit(struct net *net) { int i; for_each_possible_cpu(i) { inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]); } kfree(net->ipv6.icmp_sk); } static struct pernet_operations icmpv6_sk_ops = { .init = icmpv6_sk_init, .exit = icmpv6_sk_exit, }; int __init icmpv6_init(void) { int err; err = register_pernet_subsys(&icmpv6_sk_ops); if (err < 0) return err; err = -EAGAIN; if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) goto fail; return 0; fail: printk(KERN_ERR "Failed to register ICMP6 protocol\n"); unregister_pernet_subsys(&icmpv6_sk_ops); return err; } void icmpv6_cleanup(void) { unregister_pernet_subsys(&icmpv6_sk_ops); inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); } static const struct icmp6_err { int err; int fatal; } tab_unreach[] = { { /* NOROUTE */ .err = ENETUNREACH, .fatal = 0, }, { /* ADM_PROHIBITED */ .err = EACCES, .fatal = 1, }, { /* Was NOT_NEIGHBOUR, now reserved */ .err = EHOSTUNREACH, .fatal = 0, }, { /* ADDR_UNREACH */ .err = EHOSTUNREACH, .fatal = 0, }, { /* PORT_UNREACH */ .err = ECONNREFUSED, .fatal = 1, }, { /* POLICY_FAIL */ .err = EACCES, .fatal = 1, }, { /* REJECT_ROUTE */ .err = EACCES, .fatal = 1, }, }; int icmpv6_err_convert(u8 type, u8 code, int *err) { int fatal = 0; *err = EPROTO; switch (type) { case ICMPV6_DEST_UNREACH: fatal = 1; if (code < ARRAY_SIZE(tab_unreach)) { *err = tab_unreach[code].err; fatal = tab_unreach[code].fatal; } break; case ICMPV6_PKT_TOOBIG: *err = EMSGSIZE; break; case ICMPV6_PARAMPROB: *err = EPROTO; fatal = 1; break; case ICMPV6_TIME_EXCEED: *err = EHOSTUNREACH; break; } return fatal; } EXPORT_SYMBOL(icmpv6_err_convert); #ifdef CONFIG_SYSCTL ctl_table ipv6_icmp_table_template[] = { { .procname = "ratelimit", .data = &init_net.ipv6.sysctl.icmpv6_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, { }, }; struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) { struct ctl_table *table; table = kmemdup(ipv6_icmp_table_template, sizeof(ipv6_icmp_table_template), GFP_KERNEL); if (table) table[0].data = &net->ipv6.sysctl.icmpv6_time; return table; } #endif
gpl-2.0
jianC/android_kernel_htc_msm7x30
drivers/media/dvb/dvb-usb/dibusb-mb.c
761
14514
/* DVB USB compliant linux driver for mobile DVB-T USB devices based on * reference designs made by DiBcom (http://www.dibcom.fr/) (DiB3000M-B) * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) * * based on GPL code from DiBcom, which has * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr) * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information */ #include "dibusb.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int dib3000mb_i2c_gate_ctrl(struct dvb_frontend* fe, int enable) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dibusb_state *st = adap->priv; return st->ops.tuner_pass_ctrl(fe, enable, st->tuner_addr); } static int dibusb_dib3000mb_frontend_attach(struct dvb_usb_adapter *adap) { struct dib3000_config demod_cfg; struct dibusb_state *st = adap->priv; demod_cfg.demod_address = 0x8; if ((adap->fe = dvb_attach(dib3000mb_attach, &demod_cfg, &adap->dev->i2c_adap, &st->ops)) == NULL) return -ENODEV; adap->fe->ops.i2c_gate_ctrl = dib3000mb_i2c_gate_ctrl; return 0; } static int dibusb_thomson_tuner_attach(struct dvb_usb_adapter *adap) { struct dibusb_state *st = adap->priv; st->tuner_addr = 0x61; dvb_attach(dvb_pll_attach, adap->fe, 0x61, &adap->dev->i2c_adap, DVB_PLL_TUA6010XS); return 0; } static int dibusb_panasonic_tuner_attach(struct dvb_usb_adapter *adap) { struct dibusb_state *st = adap->priv; st->tuner_addr = 0x60; dvb_attach(dvb_pll_attach, adap->fe, 0x60, &adap->dev->i2c_adap, DVB_PLL_TDA665X); return 0; } /* Some of the Artec 1.1 device aren't equipped with the default tuner * (Thomson Cable), but with a Panasonic ENV77H11D5. This function figures * this out. */ static int dibusb_tuner_probe_and_attach(struct dvb_usb_adapter *adap) { u8 b[2] = { 0,0 }, b2[1]; int ret = 0; struct i2c_msg msg[2] = { { .flags = 0, .buf = b, .len = 2 }, { .flags = I2C_M_RD, .buf = b2, .len = 1 }, }; struct dibusb_state *st = adap->priv; /* the Panasonic sits on I2C addrass 0x60, the Thomson on 0x61 */ msg[0].addr = msg[1].addr = st->tuner_addr = 0x60; if (adap->fe->ops.i2c_gate_ctrl) adap->fe->ops.i2c_gate_ctrl(adap->fe,1); if (i2c_transfer(&adap->dev->i2c_adap, msg, 2) != 2) { err("tuner i2c write failed."); ret = -EREMOTEIO; } if (adap->fe->ops.i2c_gate_ctrl) adap->fe->ops.i2c_gate_ctrl(adap->fe,0); if (b2[0] == 0xfe) { info("This device has the Thomson Cable onboard. Which is default."); ret = dibusb_thomson_tuner_attach(adap); } else { info("This device has the Panasonic ENV77H11D5 onboard."); ret = dibusb_panasonic_tuner_attach(adap); } return ret; } /* USB Driver stuff */ static struct dvb_usb_device_properties dibusb1_1_properties; static struct dvb_usb_device_properties dibusb1_1_an2235_properties; static struct dvb_usb_device_properties dibusb2_0b_properties; static struct dvb_usb_device_properties artec_t1_usb2_properties; static int dibusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { if (0 == dvb_usb_device_init(intf, &dibusb1_1_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &dibusb1_1_an2235_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &dibusb2_0b_properties, THIS_MODULE, NULL, adapter_nr) || 0 == dvb_usb_device_init(intf, &artec_t1_usb2_properties, THIS_MODULE, NULL, adapter_nr)) return 0; return -EINVAL; } /* do not change the order of the ID table */ static struct usb_device_id dibusb_dib3000mb_table [] = { /* 00 */ { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_AVERMEDIA_DVBT_USB_COLD) }, /* 01 */ { USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_AVERMEDIA_DVBT_USB_WARM) }, /* 02 */ { USB_DEVICE(USB_VID_COMPRO, USB_PID_COMPRO_DVBU2000_COLD) }, /* 03 */ { USB_DEVICE(USB_VID_COMPRO, USB_PID_COMPRO_DVBU2000_WARM) }, /* 04 */ { USB_DEVICE(USB_VID_COMPRO_UNK, USB_PID_COMPRO_DVBU2000_UNK_COLD) }, /* 05 */ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_MOD3000_COLD) }, /* 06 */ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_MOD3000_WARM) }, /* 07 */ { USB_DEVICE(USB_VID_EMPIA, USB_PID_KWORLD_VSTREAM_COLD) }, /* 08 */ { USB_DEVICE(USB_VID_EMPIA, USB_PID_KWORLD_VSTREAM_WARM) }, /* 09 */ { USB_DEVICE(USB_VID_GRANDTEC, USB_PID_GRANDTEC_DVBT_USB_COLD) }, /* 10 */ { USB_DEVICE(USB_VID_GRANDTEC, USB_PID_GRANDTEC_DVBT_USB_WARM) }, /* 11 */ { USB_DEVICE(USB_VID_GRANDTEC, USB_PID_DIBCOM_MOD3000_COLD) }, /* 12 */ { USB_DEVICE(USB_VID_GRANDTEC, USB_PID_DIBCOM_MOD3000_WARM) }, /* 13 */ { USB_DEVICE(USB_VID_HYPER_PALTEK, USB_PID_UNK_HYPER_PALTEK_COLD) }, /* 14 */ { USB_DEVICE(USB_VID_HYPER_PALTEK, USB_PID_UNK_HYPER_PALTEK_WARM) }, /* 15 */ { USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_TWINHAN_VP7041_COLD) }, /* 16 */ { USB_DEVICE(USB_VID_VISIONPLUS, USB_PID_TWINHAN_VP7041_WARM) }, /* 17 */ { USB_DEVICE(USB_VID_TWINHAN, USB_PID_TWINHAN_VP7041_COLD) }, /* 18 */ { USB_DEVICE(USB_VID_TWINHAN, USB_PID_TWINHAN_VP7041_WARM) }, /* 19 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_COLD) }, /* 20 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_WARM) }, /* 21 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_AN2235_COLD) }, /* 22 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_AN2235_WARM) }, /* 23 */ { USB_DEVICE(USB_VID_ADSTECH, USB_PID_ADSTECH_USB2_COLD) }, /* device ID with default DIBUSB2_0-firmware and with the hacked firmware */ /* 24 */ { USB_DEVICE(USB_VID_ADSTECH, USB_PID_ADSTECH_USB2_WARM) }, /* 25 */ { USB_DEVICE(USB_VID_KYE, USB_PID_KYE_DVB_T_COLD) }, /* 26 */ { USB_DEVICE(USB_VID_KYE, USB_PID_KYE_DVB_T_WARM) }, /* 27 */ { USB_DEVICE(USB_VID_KWORLD, USB_PID_KWORLD_VSTREAM_COLD) }, /* 28 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_USB2_COLD) }, /* 29 */ { USB_DEVICE(USB_VID_ULTIMA_ELECTRONIC, USB_PID_ULTIMA_TVBOX_USB2_WARM) }, /* * XXX: As Artec just 'forgot' to program the EEPROM on some Artec T1 devices * we don't catch these faulty IDs (namely 'Cypress FX1 USB controller') that * have been left on the device. If you don't have such a device but an Artec * device that's supposed to work with this driver but is not detected by it, * free to enable CONFIG_DVB_USB_DIBUSB_MB_FAULTY via your kernel config. */ #ifdef CONFIG_DVB_USB_DIBUSB_MB_FAULTY /* 30 */ { USB_DEVICE(USB_VID_ANCHOR, USB_PID_ULTIMA_TVBOX_ANCHOR_COLD) }, #endif { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, dibusb_dib3000mb_table); static struct dvb_usb_device_properties dibusb1_1_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_AN2135, .firmware = "dvb-usb-dibusb-5.0.0.11.fw", .num_adapters = 1, .adapter = { { .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 16, .streaming_ctrl = dibusb_streaming_ctrl, .pid_filter = dibusb_pid_filter, .pid_filter_ctrl = dibusb_pid_filter_ctrl, .frontend_attach = dibusb_dib3000mb_frontend_attach, .tuner_attach = dibusb_tuner_probe_and_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 7, .endpoint = 0x02, .u = { .bulk = { .buffersize = 4096, } } }, .size_of_priv = sizeof(struct dibusb_state), } }, .power_ctrl = dibusb_power_ctrl, .rc_interval = DEFAULT_RC_INTERVAL, .rc_key_map = ir_codes_dibusb_table, .rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */ .rc_query = dibusb_rc_query, .i2c_algo = &dibusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 9, .devices = { { "AVerMedia AverTV DVBT USB1.1", { &dibusb_dib3000mb_table[0], NULL }, { &dibusb_dib3000mb_table[1], NULL }, }, { "Compro Videomate DVB-U2000 - DVB-T USB1.1 (please confirm to linux-dvb)", { &dibusb_dib3000mb_table[2], &dibusb_dib3000mb_table[4], NULL}, { &dibusb_dib3000mb_table[3], NULL }, }, { "DiBcom USB1.1 DVB-T reference design (MOD3000)", { &dibusb_dib3000mb_table[5], NULL }, { &dibusb_dib3000mb_table[6], NULL }, }, { "KWorld V-Stream XPERT DTV - DVB-T USB1.1", { &dibusb_dib3000mb_table[7], NULL }, { &dibusb_dib3000mb_table[8], NULL }, }, { "Grandtec USB1.1 DVB-T", { &dibusb_dib3000mb_table[9], &dibusb_dib3000mb_table[11], NULL }, { &dibusb_dib3000mb_table[10], &dibusb_dib3000mb_table[12], NULL }, }, { "Unknown USB1.1 DVB-T device ???? please report the name to the author", { &dibusb_dib3000mb_table[13], NULL }, { &dibusb_dib3000mb_table[14], NULL }, }, { "TwinhanDTV USB-Ter USB1.1 / Magic Box I / HAMA USB1.1 DVB-T device", { &dibusb_dib3000mb_table[15], &dibusb_dib3000mb_table[17], NULL}, { &dibusb_dib3000mb_table[16], &dibusb_dib3000mb_table[18], NULL}, }, { "Artec T1 USB1.1 TVBOX with AN2135", { &dibusb_dib3000mb_table[19], NULL }, { &dibusb_dib3000mb_table[20], NULL }, }, { "VideoWalker DVB-T USB", { &dibusb_dib3000mb_table[25], NULL }, { &dibusb_dib3000mb_table[26], NULL }, }, } }; static struct dvb_usb_device_properties dibusb1_1_an2235_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_AN2235, .firmware = "dvb-usb-dibusb-an2235-01.fw", .num_adapters = 1, .adapter = { { .caps = DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF | DVB_USB_ADAP_HAS_PID_FILTER, .pid_filter_count = 16, .streaming_ctrl = dibusb_streaming_ctrl, .pid_filter = dibusb_pid_filter, .pid_filter_ctrl = dibusb_pid_filter_ctrl, .frontend_attach = dibusb_dib3000mb_frontend_attach, .tuner_attach = dibusb_tuner_probe_and_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 7, .endpoint = 0x02, .u = { .bulk = { .buffersize = 4096, } } }, .size_of_priv = sizeof(struct dibusb_state), }, }, .power_ctrl = dibusb_power_ctrl, .rc_interval = DEFAULT_RC_INTERVAL, .rc_key_map = ir_codes_dibusb_table, .rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */ .rc_query = dibusb_rc_query, .i2c_algo = &dibusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, #ifdef CONFIG_DVB_USB_DIBUSB_MB_FAULTY .num_device_descs = 2, #else .num_device_descs = 1, #endif .devices = { { "Artec T1 USB1.1 TVBOX with AN2235", { &dibusb_dib3000mb_table[21], NULL }, { &dibusb_dib3000mb_table[22], NULL }, }, #ifdef CONFIG_DVB_USB_DIBUSB_MB_FAULTY { "Artec T1 USB1.1 TVBOX with AN2235 (faulty USB IDs)", { &dibusb_dib3000mb_table[30], NULL }, { NULL }, }, { NULL }, #endif } }; static struct dvb_usb_device_properties dibusb2_0b_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-adstech-usb2-02.fw", .num_adapters = 1, .adapter = { { .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 16, .streaming_ctrl = dibusb2_0_streaming_ctrl, .pid_filter = dibusb_pid_filter, .pid_filter_ctrl = dibusb_pid_filter_ctrl, .frontend_attach = dibusb_dib3000mb_frontend_attach, .tuner_attach = dibusb_thomson_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 7, .endpoint = 0x06, .u = { .bulk = { .buffersize = 4096, } } }, .size_of_priv = sizeof(struct dibusb_state), } }, .power_ctrl = dibusb2_0_power_ctrl, .rc_interval = DEFAULT_RC_INTERVAL, .rc_key_map = ir_codes_dibusb_table, .rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */ .rc_query = dibusb_rc_query, .i2c_algo = &dibusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 2, .devices = { { "KWorld/ADSTech Instant DVB-T USB2.0", { &dibusb_dib3000mb_table[23], NULL }, { &dibusb_dib3000mb_table[24], NULL }, }, { "KWorld Xpert DVB-T USB2.0", { &dibusb_dib3000mb_table[27], NULL }, { NULL } }, { NULL }, } }; static struct dvb_usb_device_properties artec_t1_usb2_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-dibusb-6.0.0.8.fw", .num_adapters = 1, .adapter = { { .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 16, .streaming_ctrl = dibusb2_0_streaming_ctrl, .pid_filter = dibusb_pid_filter, .pid_filter_ctrl = dibusb_pid_filter_ctrl, .frontend_attach = dibusb_dib3000mb_frontend_attach, .tuner_attach = dibusb_tuner_probe_and_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 7, .endpoint = 0x06, .u = { .bulk = { .buffersize = 4096, } } }, .size_of_priv = sizeof(struct dibusb_state), } }, .power_ctrl = dibusb2_0_power_ctrl, .rc_interval = DEFAULT_RC_INTERVAL, .rc_key_map = ir_codes_dibusb_table, .rc_key_map_size = 111, /* wow, that is ugly ... I want to load it to the driver dynamically */ .rc_query = dibusb_rc_query, .i2c_algo = &dibusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "Artec T1 USB2.0", { &dibusb_dib3000mb_table[28], NULL }, { &dibusb_dib3000mb_table[29], NULL }, }, { NULL }, } }; static struct usb_driver dibusb_driver = { .name = "dvb_usb_dibusb_mb", .probe = dibusb_probe, .disconnect = dvb_usb_device_exit, .id_table = dibusb_dib3000mb_table, }; /* module stuff */ static int __init dibusb_module_init(void) { int result; if ((result = usb_register(&dibusb_driver))) { err("usb_register failed. Error number %d",result); return result; } return 0; } static void __exit dibusb_module_exit(void) { /* deregister this driver from the USB subsystem */ usb_deregister(&dibusb_driver); } module_init (dibusb_module_init); module_exit (dibusb_module_exit); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>"); MODULE_DESCRIPTION("Driver for DiBcom USB DVB-T devices (DiB3000M-B based)"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
gpl-2.0
hubcapsc/orangefs_kmod
net/netfilter/ipset/ip_set_hash_ipportip.c
761
11462
/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Kernel module implementing an IP set type: the hash:ip,port,ip type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <net/tcp.h> #include <linux/netfilter.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_hash.h> #define IPSET_TYPE_REV_MIN 0 /* 1 SCTP and UDPLITE support added */ /* 2 Counters support added */ /* 3 Comments support added */ /* 4 Forceadd support added */ #define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); IP_SET_MODULE_DESC("hash:ip,port,ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); MODULE_ALIAS("ip_set_hash:ip,port,ip"); /* Type specific function prefix */ #define HTYPE hash_ipportip /* IPv4 variant */ /* Member elements */ struct hash_ipportip4_elem { __be32 ip; __be32 ip2; __be16 port; u8 proto; u8 padding; }; static inline bool hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1, const struct hash_ipportip4_elem *ip2, u32 *multi) { return ip1->ip == ip2->ip && ip1->ip2 == ip2->ip2 && ip1->port == ip2->port && ip1->proto == ip2->proto; } static bool hash_ipportip4_data_list(struct sk_buff *skb, const struct hash_ipportip4_elem *data) { if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) || nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto)) goto nla_put_failure; return 0; nla_put_failure: return 1; } static inline void hash_ipportip4_data_next(struct hash_ipportip4_elem *next, const struct hash_ipportip4_elem *d) { next->ip = d->ip; next->port = d->port; } /* Common functions */ #define MTYPE hash_ipportip4 #define PF 4 #define HOST_MASK 32 #include "ip_set_hash_gen.h" static int hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.port, &e.proto)) return -EINVAL; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct hash_ipportip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip, ip_to = 0, p = 0, port, port_to; bool with_ports = false; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) || ip_set_get_extensions(set, tb, &ext); if (ret) return ret; ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &e.ip2); if (ret) return ret; if (tb[IPSET_ATTR_PORT]) e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); else return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_PROTO]) { e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(e.proto); if (e.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else return -IPSET_ERR_MISSING_PROTO; if (!(with_ports || e.proto == IPPROTO_ICMP)) e.port = 0; if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_PORT_TO])) { ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_eexist(ret, flags) ? 0 : ret; } ip_to = ip = ntohl(e.ip); if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip > ip_to) swap(ip, ip_to); } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!cidr || cidr > 32) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); } port_to = port = ntohs(e.port); if (with_ports && tb[IPSET_ATTR_PORT_TO]) { port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); } if (retried) ip = ntohl(h->next.ip); for (; !before(ip_to, ip); ip++) { p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) : port; for (; p <= port_to; p++) { e.ip = htonl(ip); e.port = htons(p); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; else ret = 0; } } return ret; } /* IPv6 variant */ struct hash_ipportip6_elem { union nf_inet_addr ip; union nf_inet_addr ip2; __be16 port; u8 proto; u8 padding; }; /* Common functions */ static inline bool hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1, const struct hash_ipportip6_elem *ip2, u32 *multi) { return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && ipv6_addr_equal(&ip1->ip2.in6, &ip2->ip2.in6) && ip1->port == ip2->port && ip1->proto == ip2->proto; } static bool hash_ipportip6_data_list(struct sk_buff *skb, const struct hash_ipportip6_elem *data) { if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto)) goto nla_put_failure; return 0; nla_put_failure: return 1; } static inline void hash_ipportip6_data_next(struct hash_ipportip4_elem *next, const struct hash_ipportip6_elem *d) { next->port = d->port; } #undef MTYPE #undef PF #undef HOST_MASK #define MTYPE hash_ipportip6 #define PF 6 #define HOST_MASK 128 #define IP_SET_EMIT_CREATE #include "ip_set_hash_gen.h" static int hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.port, &e.proto)) return -EINVAL; ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct hash_ipportip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port, port_to; bool with_ports = false; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) || tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) || ip_set_get_extensions(set, tb, &ext); if (ret) return ret; ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2); if (ret) return ret; if (tb[IPSET_ATTR_PORT]) e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); else return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_PROTO]) { e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(e.proto); if (e.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else return -IPSET_ERR_MISSING_PROTO; if (!(with_ports || e.proto == IPPROTO_ICMPV6)) e.port = 0; if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_eexist(ret, flags) ? 0 : ret; } port = ntohs(e.port); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); if (retried) port = ntohs(h->next.port); for (; port <= port_to; port++) { e.port = htons(port); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; else ret = 0; } return ret; } static struct ip_set_type hash_ipportip_type __read_mostly = { .name = "hash:ip,port,ip", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, .dimension = IPSET_DIM_THREE, .family = NFPROTO_UNSPEC, .revision_min = IPSET_TYPE_REV_MIN, .revision_max = IPSET_TYPE_REV_MAX, .create = hash_ipportip_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_PROBES] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_IP2] = { .type = NLA_NESTED }, [IPSET_ATTR_PORT] = { .type = NLA_U16 }, [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING }, [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; static int __init hash_ipportip_init(void) { return ip_set_type_register(&hash_ipportip_type); } static void __exit hash_ipportip_fini(void) { ip_set_type_unregister(&hash_ipportip_type); } module_init(hash_ipportip_init); module_exit(hash_ipportip_fini);
gpl-2.0
jmztaylor/android_kernel_htc_express
drivers/pcmcia/rsrc_nonstatic.c
761
30458
/* * rsrc_nonstatic.c -- Resource management routines for !SS_CAP_STATIC_MAP sockets * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * (C) 1999 David A. Hinds */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/timer.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/io.h> #include <asm/irq.h> #include <pcmcia/cs_types.h> #include <pcmcia/ss.h> #include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include "cs_internal.h" /* moved to rsrc_mgr.c MODULE_AUTHOR("David A. Hinds, Dominik Brodowski"); MODULE_LICENSE("GPL"); */ /* Parameters that can be set with 'insmod' */ #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444) INT_MODULE_PARM(probe_mem, 1); /* memory probe? */ #ifdef CONFIG_PCMCIA_PROBE INT_MODULE_PARM(probe_io, 1); /* IO port probe? */ INT_MODULE_PARM(mem_limit, 0x10000); #endif /* for io_db and mem_db */ struct resource_map { u_long base, num; struct resource_map *next; }; struct socket_data { struct resource_map mem_db; struct resource_map mem_db_valid; struct resource_map io_db; }; #define MEM_PROBE_LOW (1 << 0) #define MEM_PROBE_HIGH (1 << 1) /*====================================================================== Linux resource management extensions ======================================================================*/ static struct resource * claim_region(struct pcmcia_socket *s, resource_size_t base, resource_size_t size, int type, char *name) { struct resource *res, *parent; parent = type & IORESOURCE_MEM ? &iomem_resource : &ioport_resource; res = pcmcia_make_resource(base, size, type | IORESOURCE_BUSY, name); if (res) { #ifdef CONFIG_PCI if (s && s->cb_dev) parent = pci_find_parent_resource(s->cb_dev, res); #endif if (!parent || request_resource(parent, res)) { kfree(res); res = NULL; } } return res; } static void free_region(struct resource *res) { if (res) { release_resource(res); kfree(res); } } /*====================================================================== These manage the internal databases of available resources. ======================================================================*/ static int add_interval(struct resource_map *map, u_long base, u_long num) { struct resource_map *p, *q; for (p = map; ; p = p->next) { if ((p != map) && (p->base+p->num >= base)) { p->num = max(num + base - p->base, p->num); return 0; } if ((p->next == map) || (p->next->base > base+num-1)) break; } q = kmalloc(sizeof(struct resource_map), GFP_KERNEL); if (!q) { printk(KERN_WARNING "out of memory to update resources\n"); return -ENOMEM; } q->base = base; q->num = num; q->next = p->next; p->next = q; return 0; } /*====================================================================*/ static int sub_interval(struct resource_map *map, u_long base, u_long num) { struct resource_map *p, *q; for (p = map; ; p = q) { q = p->next; if (q == map) break; if ((q->base+q->num > base) && (base+num > q->base)) { if (q->base >= base) { if (q->base+q->num <= base+num) { /* Delete whole block */ p->next = q->next; kfree(q); /* don't advance the pointer yet */ q = p; } else { /* Cut off bit from the front */ q->num = q->base + q->num - base - num; q->base = base + num; } } else if (q->base+q->num <= base+num) { /* Cut off bit from the end */ q->num = base - q->base; } else { /* Split the block into two pieces */ p = kmalloc(sizeof(struct resource_map), GFP_KERNEL); if (!p) { printk(KERN_WARNING "out of memory to update resources\n"); return -ENOMEM; } p->base = base+num; p->num = q->base+q->num - p->base; q->num = base - q->base; p->next = q->next ; q->next = p; } } } return 0; } /*====================================================================== These routines examine a region of IO or memory addresses to determine what ranges might be genuinely available. ======================================================================*/ #ifdef CONFIG_PCMCIA_PROBE static void do_io_probe(struct pcmcia_socket *s, unsigned int base, unsigned int num) { struct resource *res; struct socket_data *s_data = s->resource_data; unsigned int i, j, bad; int any; u_char *b, hole, most; dev_printk(KERN_INFO, &s->dev, "cs: IO port probe %#x-%#x:", base, base+num-1); /* First, what does a floating port look like? */ b = kzalloc(256, GFP_KERNEL); if (!b) { printk("\n"); dev_printk(KERN_ERR, &s->dev, "do_io_probe: unable to kmalloc 256 bytes"); return; } for (i = base, most = 0; i < base+num; i += 8) { res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); if (!res) continue; hole = inb(i); for (j = 1; j < 8; j++) if (inb(i+j) != hole) break; free_region(res); if ((j == 8) && (++b[hole] > b[most])) most = hole; if (b[most] == 127) break; } kfree(b); bad = any = 0; for (i = base; i < base+num; i += 8) { res = claim_region(s, i, 8, IORESOURCE_IO, "PCMCIA ioprobe"); if (!res) { if (!any) printk(" excluding"); if (!bad) bad = any = i; continue; } for (j = 0; j < 8; j++) if (inb(i+j) != most) break; free_region(res); if (j < 8) { if (!any) printk(" excluding"); if (!bad) bad = any = i; } else { if (bad) { sub_interval(&s_data->io_db, bad, i-bad); printk(" %#x-%#x", bad, i-1); bad = 0; } } } if (bad) { if ((num > 16) && (bad == base) && (i == base+num)) { sub_interval(&s_data->io_db, bad, i-bad); printk(" nothing: probe failed.\n"); return; } else { sub_interval(&s_data->io_db, bad, i-bad); printk(" %#x-%#x", bad, i-1); } } printk(any ? "\n" : " clean.\n"); } #endif /*======================================================================*/ /** * readable() - iomem validation function for cards with a valid CIS */ static int readable(struct pcmcia_socket *s, struct resource *res, unsigned int *count) { int ret = -EINVAL; if (s->fake_cis) { dev_dbg(&s->dev, "fake CIS is being used: can't validate mem\n"); return 0; } s->cis_mem.res = res; s->cis_virt = ioremap(res->start, s->map_size); if (s->cis_virt) { mutex_unlock(&s->ops_mutex); /* as we're only called from pcmcia.c, we're safe */ if (s->callback->validate) ret = s->callback->validate(s, count); /* invalidate mapping */ mutex_lock(&s->ops_mutex); iounmap(s->cis_virt); s->cis_virt = NULL; } s->cis_mem.res = NULL; if ((ret) || (*count == 0)) return -EINVAL; return 0; } /** * checksum() - iomem validation function for simple memory cards */ static int checksum(struct pcmcia_socket *s, struct resource *res, unsigned int *value) { pccard_mem_map map; int i, a = 0, b = -1, d; void __iomem *virt; virt = ioremap(res->start, s->map_size); if (virt) { map.map = 0; map.flags = MAP_ACTIVE; map.speed = 0; map.res = res; map.card_start = 0; s->ops->set_mem_map(s, &map); /* Don't bother checking every word... */ for (i = 0; i < s->map_size; i += 44) { d = readl(virt+i); a += d; b &= d; } map.flags = 0; s->ops->set_mem_map(s, &map); iounmap(virt); } if (b == -1) return -EINVAL; *value = a; return 0; } /** * do_validate_mem() - low level validate a memory region for PCMCIA use * @s: PCMCIA socket to validate * @base: start address of resource to check * @size: size of resource to check * @validate: validation function to use * * do_validate_mem() splits up the memory region which is to be checked * into two parts. Both are passed to the @validate() function. If * @validate() returns non-zero, or the value parameter to @validate() * is zero, or the value parameter is different between both calls, * the check fails, and -EINVAL is returned. Else, 0 is returned. */ static int do_validate_mem(struct pcmcia_socket *s, unsigned long base, unsigned long size, int validate (struct pcmcia_socket *s, struct resource *res, unsigned int *value)) { struct socket_data *s_data = s->resource_data; struct resource *res1, *res2; unsigned int info1 = 1, info2 = 1; int ret = -EINVAL; res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe"); res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM, "PCMCIA memprobe"); if (res1 && res2) { ret = 0; if (validate) { ret = validate(s, res1, &info1); ret += validate(s, res2, &info2); } } free_region(res2); free_region(res1); dev_dbg(&s->dev, "cs: memory probe 0x%06lx-0x%06lx: %p %p %u %u %u", base, base+size-1, res1, res2, ret, info1, info2); if ((ret) || (info1 != info2) || (info1 == 0)) return -EINVAL; if (validate && !s->fake_cis) { /* move it to the validated data set */ add_interval(&s_data->mem_db_valid, base, size); sub_interval(&s_data->mem_db, base, size); } return 0; } /** * do_mem_probe() - validate a memory region for PCMCIA use * @s: PCMCIA socket to validate * @base: start address of resource to check * @num: size of resource to check * @validate: validation function to use * @fallback: validation function to use if validate fails * * do_mem_probe() checks a memory region for use by the PCMCIA subsystem. * To do so, the area is split up into sensible parts, and then passed * into the @validate() function. Only if @validate() and @fallback() fail, * the area is marked as unavaibale for use by the PCMCIA subsystem. The * function returns the size of the usable memory area. */ static int do_mem_probe(struct pcmcia_socket *s, u_long base, u_long num, int validate (struct pcmcia_socket *s, struct resource *res, unsigned int *value), int fallback (struct pcmcia_socket *s, struct resource *res, unsigned int *value)) { struct socket_data *s_data = s->resource_data; u_long i, j, bad, fail, step; dev_printk(KERN_INFO, &s->dev, "cs: memory probe 0x%06lx-0x%06lx:", base, base+num-1); bad = fail = 0; step = (num < 0x20000) ? 0x2000 : ((num>>4) & ~0x1fff); /* don't allow too large steps */ if (step > 0x800000) step = 0x800000; /* cis_readable wants to map 2x map_size */ if (step < 2 * s->map_size) step = 2 * s->map_size; for (i = j = base; i < base+num; i = j + step) { if (!fail) { for (j = i; j < base+num; j += step) { if (!do_validate_mem(s, j, step, validate)) break; } fail = ((i == base) && (j == base+num)); } if ((fail) && (fallback)) { for (j = i; j < base+num; j += step) if (!do_validate_mem(s, j, step, fallback)) break; } if (i != j) { if (!bad) printk(" excluding"); printk(" %#05lx-%#05lx", i, j-1); sub_interval(&s_data->mem_db, i, j-i); bad += j-i; } } printk(bad ? "\n" : " clean.\n"); return num - bad; } #ifdef CONFIG_PCMCIA_PROBE /** * inv_probe() - top-to-bottom search for one usuable high memory area * @s: PCMCIA socket to validate * @m: resource_map to check */ static u_long inv_probe(struct resource_map *m, struct pcmcia_socket *s) { struct socket_data *s_data = s->resource_data; u_long ok; if (m == &s_data->mem_db) return 0; ok = inv_probe(m->next, s); if (ok) { if (m->base >= 0x100000) sub_interval(&s_data->mem_db, m->base, m->num); return ok; } if (m->base < 0x100000) return 0; return do_mem_probe(s, m->base, m->num, readable, checksum); } /** * validate_mem() - memory probe function * @s: PCMCIA socket to validate * @probe_mask: MEM_PROBE_LOW | MEM_PROBE_HIGH * * The memory probe. If the memory list includes a 64K-aligned block * below 1MB, we probe in 64K chunks, and as soon as we accumulate at * least mem_limit free space, we quit. Returns 0 on usuable ports. */ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask) { struct resource_map *m, mm; static unsigned char order[] = { 0xd0, 0xe0, 0xc0, 0xf0 }; unsigned long b, i, ok = 0; struct socket_data *s_data = s->resource_data; /* We do up to four passes through the list */ if (probe_mask & MEM_PROBE_HIGH) { if (inv_probe(s_data->mem_db.next, s) > 0) return 0; if (s_data->mem_db_valid.next != &s_data->mem_db_valid) return 0; dev_printk(KERN_NOTICE, &s->dev, "cs: warning: no high memory space available!\n"); return -ENODEV; } for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) { mm = *m; /* Only probe < 1 MB */ if (mm.base >= 0x100000) continue; if ((mm.base | mm.num) & 0xffff) { ok += do_mem_probe(s, mm.base, mm.num, readable, checksum); continue; } /* Special probe for 64K-aligned block */ for (i = 0; i < 4; i++) { b = order[i] << 12; if ((b >= mm.base) && (b+0x10000 <= mm.base+mm.num)) { if (ok >= mem_limit) sub_interval(&s_data->mem_db, b, 0x10000); else ok += do_mem_probe(s, b, 0x10000, readable, checksum); } } } if (ok > 0) return 0; return -ENODEV; } #else /* CONFIG_PCMCIA_PROBE */ /** * validate_mem() - memory probe function * @s: PCMCIA socket to validate * @probe_mask: ignored * * Returns 0 on usuable ports. */ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask) { struct resource_map *m, mm; struct socket_data *s_data = s->resource_data; unsigned long ok = 0; for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) { mm = *m; ok += do_mem_probe(s, mm.base, mm.num, readable, checksum); } if (ok > 0) return 0; return -ENODEV; } #endif /* CONFIG_PCMCIA_PROBE */ /** * pcmcia_nonstatic_validate_mem() - try to validate iomem for PCMCIA use * @s: PCMCIA socket to validate * * This is tricky... when we set up CIS memory, we try to validate * the memory window space allocations. * * Locking note: Must be called with skt_mutex held! */ static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s) { struct socket_data *s_data = s->resource_data; unsigned int probe_mask = MEM_PROBE_LOW; int ret; if (!probe_mem || !(s->state & SOCKET_PRESENT)) return 0; if (s->features & SS_CAP_PAGE_REGS) probe_mask = MEM_PROBE_HIGH; ret = validate_mem(s, probe_mask); if (s_data->mem_db_valid.next != &s_data->mem_db_valid) return 0; return ret; } struct pcmcia_align_data { unsigned long mask; unsigned long offset; struct resource_map *map; }; static resource_size_t pcmcia_common_align(struct pcmcia_align_data *align_data, resource_size_t start) { resource_size_t ret; /* * Ensure that we have the correct start address */ ret = (start & ~align_data->mask) + align_data->offset; if (ret < start) ret += align_data->mask + 1; return ret; } static resource_size_t pcmcia_align(void *align_data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pcmcia_align_data *data = align_data; struct resource_map *m; resource_size_t start; start = pcmcia_common_align(data, res->start); for (m = data->map->next; m != data->map; m = m->next) { unsigned long map_start = m->base; unsigned long map_end = m->base + m->num - 1; /* * If the lower resources are not available, try aligning * to this entry of the resource database to see if it'll * fit here. */ if (start < map_start) start = pcmcia_common_align(data, map_start); /* * If we're above the area which was passed in, there's * no point proceeding. */ if (start >= res->end) break; if ((start + size - 1) <= map_end) break; } /* * If we failed to find something suitable, ensure we fail. */ if (m == data->map) start = res->end; return start; } /* * Adjust an existing IO region allocation, but making sure that we don't * encroach outside the resources which the user supplied. */ static int __nonstatic_adjust_io_region(struct pcmcia_socket *s, unsigned long r_start, unsigned long r_end) { struct resource_map *m; struct socket_data *s_data = s->resource_data; int ret = -ENOMEM; for (m = s_data->io_db.next; m != &s_data->io_db; m = m->next) { unsigned long start = m->base; unsigned long end = m->base + m->num - 1; if (start > r_start || r_end > end) continue; ret = 0; } return ret; } /*====================================================================== These find ranges of I/O ports or memory addresses that are not currently allocated by other devices. The 'align' field should reflect the number of bits of address that need to be preserved from the initial value of *base. It should be a power of two, greater than or equal to 'num'. A value of 0 means that all bits of *base are significant. *base should also be strictly less than 'align'. ======================================================================*/ static struct resource *__nonstatic_find_io_region(struct pcmcia_socket *s, unsigned long base, int num, unsigned long align) { struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO, dev_name(&s->dev)); struct socket_data *s_data = s->resource_data; struct pcmcia_align_data data; unsigned long min = base; int ret; data.mask = align - 1; data.offset = base & data.mask; data.map = &s_data->io_db; #ifdef CONFIG_PCI if (s->cb_dev) { ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, min, 0, pcmcia_align, &data); } else #endif ret = allocate_resource(&ioport_resource, res, num, min, ~0UL, 1, pcmcia_align, &data); if (ret != 0) { kfree(res); res = NULL; } return res; } static int nonstatic_find_io(struct pcmcia_socket *s, unsigned int attr, unsigned int *base, unsigned int num, unsigned int align) { int i, ret = 0; /* Check for an already-allocated window that must conflict with * what was asked for. It is a hack because it does not catch all * potential conflicts, just the most obvious ones. */ for (i = 0; i < MAX_IO_WIN; i++) { if (!s->io[i].res) continue; if (!*base) continue; if ((s->io[i].res->start & (align-1)) == *base) return -EBUSY; } for (i = 0; i < MAX_IO_WIN; i++) { struct resource *res = s->io[i].res; unsigned int try; if (res && (res->flags & IORESOURCE_BITS) != (attr & IORESOURCE_BITS)) continue; if (!res) { if (align == 0) align = 0x10000; res = s->io[i].res = __nonstatic_find_io_region(s, *base, num, align); if (!res) return -EINVAL; *base = res->start; s->io[i].res->flags = ((res->flags & ~IORESOURCE_BITS) | (attr & IORESOURCE_BITS)); s->io[i].InUse = num; return 0; } /* Try to extend top of window */ try = res->end + 1; if ((*base == 0) || (*base == try)) { ret = __nonstatic_adjust_io_region(s, res->start, res->end + num); if (!ret) { ret = adjust_resource(s->io[i].res, res->start, res->end - res->start + num + 1); if (ret) continue; *base = try; s->io[i].InUse += num; return 0; } } /* Try to extend bottom of window */ try = res->start - num; if ((*base == 0) || (*base == try)) { ret = __nonstatic_adjust_io_region(s, res->start - num, res->end); if (!ret) { ret = adjust_resource(s->io[i].res, res->start - num, res->end - res->start + num + 1); if (ret) continue; *base = try; s->io[i].InUse += num; return 0; } } } return -EINVAL; } static struct resource *nonstatic_find_mem_region(u_long base, u_long num, u_long align, int low, struct pcmcia_socket *s) { struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_MEM, dev_name(&s->dev)); struct socket_data *s_data = s->resource_data; struct pcmcia_align_data data; unsigned long min, max; int ret, i, j; low = low || !(s->features & SS_CAP_PAGE_REGS); data.mask = align - 1; data.offset = base & data.mask; for (i = 0; i < 2; i++) { data.map = &s_data->mem_db_valid; if (low) { max = 0x100000UL; min = base < max ? base : 0; } else { max = ~0UL; min = 0x100000UL + base; } for (j = 0; j < 2; j++) { #ifdef CONFIG_PCI if (s->cb_dev) { ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1, min, 0, pcmcia_align, &data); } else #endif { ret = allocate_resource(&iomem_resource, res, num, min, max, 1, pcmcia_align, &data); } if (ret == 0) break; data.map = &s_data->mem_db; } if (ret == 0 || low) break; low = 1; } if (ret != 0) { kfree(res); res = NULL; } return res; } static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) { struct socket_data *data = s->resource_data; unsigned long size = end - start + 1; int ret = 0; if (end < start) return -EINVAL; switch (action) { case ADD_MANAGED_RESOURCE: ret = add_interval(&data->mem_db, start, size); if (!ret) do_mem_probe(s, start, size, NULL, NULL); break; case REMOVE_MANAGED_RESOURCE: ret = sub_interval(&data->mem_db, start, size); break; default: ret = -EINVAL; } return ret; } static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long start, unsigned long end) { struct socket_data *data = s->resource_data; unsigned long size; int ret = 0; #if defined(CONFIG_X86) /* on x86, avoid anything < 0x100 for it is often used for * legacy platform devices */ if (start < 0x100) start = 0x100; #endif size = end - start + 1; if (end < start) return -EINVAL; if (end > IO_SPACE_LIMIT) return -EINVAL; switch (action) { case ADD_MANAGED_RESOURCE: if (add_interval(&data->io_db, start, size) != 0) { ret = -EBUSY; break; } #ifdef CONFIG_PCMCIA_PROBE if (probe_io) do_io_probe(s, start, size); #endif break; case REMOVE_MANAGED_RESOURCE: sub_interval(&data->io_db, start, size); break; default: ret = -EINVAL; break; } return ret; } #ifdef CONFIG_PCI static int nonstatic_autoadd_resources(struct pcmcia_socket *s) { struct resource *res; int i, done = 0; if (!s->cb_dev || !s->cb_dev->bus) return -ENODEV; #if defined(CONFIG_X86) /* If this is the root bus, the risk of hitting some strange * system devices is too high: If a driver isn't loaded, the * resources are not claimed; even if a driver is loaded, it * may not request all resources or even the wrong one. We * can neither trust the rest of the kernel nor ACPI/PNP and * CRS parsing to get it right. Therefore, use several * safeguards: * * - Do not auto-add resources if the CardBus bridge is on * the PCI root bus * * - Avoid any I/O ports < 0x100. * * - On PCI-PCI bridges, only use resources which are set up * exclusively for the secondary PCI bus: the risk of hitting * system devices is quite low, as they usually aren't * connected to the secondary PCI bus. */ if (s->cb_dev->bus->number == 0) return -EINVAL; for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { res = s->cb_dev->bus->resource[i]; #else pci_bus_for_each_resource(s->cb_dev->bus, res, i) { #endif if (!res) continue; if (res->flags & IORESOURCE_IO) { /* safeguard against the root resource, where the * risk of hitting any other device would be too * high */ if (res == &ioport_resource) continue; dev_printk(KERN_INFO, &s->cb_dev->dev, "pcmcia: parent PCI bridge window: %pR\n", res); if (!adjust_io(s, ADD_MANAGED_RESOURCE, res->start, res->end)) done |= IORESOURCE_IO; } if (res->flags & IORESOURCE_MEM) { /* safeguard against the root resource, where the * risk of hitting any other device would be too * high */ if (res == &iomem_resource) continue; dev_printk(KERN_INFO, &s->cb_dev->dev, "pcmcia: parent PCI bridge window: %pR\n", res); if (!adjust_memory(s, ADD_MANAGED_RESOURCE, res->start, res->end)) done |= IORESOURCE_MEM; } } /* if we got at least one of IO, and one of MEM, we can be glad and * activate the PCMCIA subsystem */ if (done == (IORESOURCE_MEM | IORESOURCE_IO)) s->resource_setup_done = 1; return 0; } #else static inline int nonstatic_autoadd_resources(struct pcmcia_socket *s) { return -ENODEV; } #endif static int nonstatic_init(struct pcmcia_socket *s) { struct socket_data *data; data = kzalloc(sizeof(struct socket_data), GFP_KERNEL); if (!data) return -ENOMEM; data->mem_db.next = &data->mem_db; data->mem_db_valid.next = &data->mem_db_valid; data->io_db.next = &data->io_db; s->resource_data = (void *) data; nonstatic_autoadd_resources(s); return 0; } static void nonstatic_release_resource_db(struct pcmcia_socket *s) { struct socket_data *data = s->resource_data; struct resource_map *p, *q; for (p = data->mem_db_valid.next; p != &data->mem_db_valid; p = q) { q = p->next; kfree(p); } for (p = data->mem_db.next; p != &data->mem_db; p = q) { q = p->next; kfree(p); } for (p = data->io_db.next; p != &data->io_db; p = q) { q = p->next; kfree(p); } } struct pccard_resource_ops pccard_nonstatic_ops = { .validate_mem = pcmcia_nonstatic_validate_mem, .find_io = nonstatic_find_io, .find_mem = nonstatic_find_mem_region, .add_io = adjust_io, .add_mem = adjust_memory, .init = nonstatic_init, .exit = nonstatic_release_resource_db, }; EXPORT_SYMBOL(pccard_nonstatic_ops); /* sysfs interface to the resource database */ static ssize_t show_io_db(struct device *dev, struct device_attribute *attr, char *buf) { struct pcmcia_socket *s = dev_get_drvdata(dev); struct socket_data *data; struct resource_map *p; ssize_t ret = 0; mutex_lock(&s->ops_mutex); data = s->resource_data; for (p = data->io_db.next; p != &data->io_db; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), "0x%08lx - 0x%08lx\n", ((unsigned long) p->base), ((unsigned long) p->base + p->num - 1)); } mutex_unlock(&s->ops_mutex); return ret; } static ssize_t store_io_db(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcmcia_socket *s = dev_get_drvdata(dev); unsigned long start_addr, end_addr; unsigned int add = ADD_MANAGED_RESOURCE; ssize_t ret = 0; ret = sscanf(buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr); if (ret != 2) { ret = sscanf(buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr); add = REMOVE_MANAGED_RESOURCE; if (ret != 2) { ret = sscanf(buf, "0x%lx - 0x%lx", &start_addr, &end_addr); add = ADD_MANAGED_RESOURCE; if (ret != 2) return -EINVAL; } } if (end_addr < start_addr) return -EINVAL; mutex_lock(&s->ops_mutex); ret = adjust_io(s, add, start_addr, end_addr); if (!ret) s->resource_setup_new = 1; mutex_unlock(&s->ops_mutex); return ret ? ret : count; } static DEVICE_ATTR(available_resources_io, 0600, show_io_db, store_io_db); static ssize_t show_mem_db(struct device *dev, struct device_attribute *attr, char *buf) { struct pcmcia_socket *s = dev_get_drvdata(dev); struct socket_data *data; struct resource_map *p; ssize_t ret = 0; mutex_lock(&s->ops_mutex); data = s->resource_data; for (p = data->mem_db_valid.next; p != &data->mem_db_valid; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), "0x%08lx - 0x%08lx\n", ((unsigned long) p->base), ((unsigned long) p->base + p->num - 1)); } for (p = data->mem_db.next; p != &data->mem_db; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), "0x%08lx - 0x%08lx\n", ((unsigned long) p->base), ((unsigned long) p->base + p->num - 1)); } mutex_unlock(&s->ops_mutex); return ret; } static ssize_t store_mem_db(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pcmcia_socket *s = dev_get_drvdata(dev); unsigned long start_addr, end_addr; unsigned int add = ADD_MANAGED_RESOURCE; ssize_t ret = 0; ret = sscanf(buf, "+ 0x%lx - 0x%lx", &start_addr, &end_addr); if (ret != 2) { ret = sscanf(buf, "- 0x%lx - 0x%lx", &start_addr, &end_addr); add = REMOVE_MANAGED_RESOURCE; if (ret != 2) { ret = sscanf(buf, "0x%lx - 0x%lx", &start_addr, &end_addr); add = ADD_MANAGED_RESOURCE; if (ret != 2) return -EINVAL; } } if (end_addr < start_addr) return -EINVAL; mutex_lock(&s->ops_mutex); ret = adjust_memory(s, add, start_addr, end_addr); if (!ret) s->resource_setup_new = 1; mutex_unlock(&s->ops_mutex); return ret ? ret : count; } static DEVICE_ATTR(available_resources_mem, 0600, show_mem_db, store_mem_db); static struct attribute *pccard_rsrc_attributes[] = { &dev_attr_available_resources_io.attr, &dev_attr_available_resources_mem.attr, NULL, }; static const struct attribute_group rsrc_attributes = { .attrs = pccard_rsrc_attributes, }; static int __devinit pccard_sysfs_add_rsrc(struct device *dev, struct class_interface *class_intf) { struct pcmcia_socket *s = dev_get_drvdata(dev); if (s->resource_ops != &pccard_nonstatic_ops) return 0; return sysfs_create_group(&dev->kobj, &rsrc_attributes); } static void __devexit pccard_sysfs_remove_rsrc(struct device *dev, struct class_interface *class_intf) { struct pcmcia_socket *s = dev_get_drvdata(dev); if (s->resource_ops != &pccard_nonstatic_ops) return; sysfs_remove_group(&dev->kobj, &rsrc_attributes); } static struct class_interface pccard_rsrc_interface __refdata = { .class = &pcmcia_socket_class, .add_dev = &pccard_sysfs_add_rsrc, .remove_dev = __devexit_p(&pccard_sysfs_remove_rsrc), }; static int __init nonstatic_sysfs_init(void) { return class_interface_register(&pccard_rsrc_interface); } static void __exit nonstatic_sysfs_exit(void) { class_interface_unregister(&pccard_rsrc_interface); } module_init(nonstatic_sysfs_init); module_exit(nonstatic_sysfs_exit);
gpl-2.0
amitbagaria/samsung-kernel-latona
drivers/staging/vt6655/rc4.c
761
2181
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * File: rc4.c * * Purpose: * * Functions: * * Revision History: * * Author: Kyle Hsu * * Date: Sep 4, 2002 * */ #include "rc4.h" void rc4_init(PRC4Ext pRC4, PBYTE pbyKey, UINT cbKey_len) { UINT ust1, ust2; UINT keyindex; UINT stateindex; PBYTE pbyst; UINT idx; pbyst = pRC4->abystate; pRC4->ux = 0; pRC4->uy = 0; for (idx = 0; idx < 256; idx++) pbyst[idx] = (BYTE)idx; keyindex = 0; stateindex = 0; for (idx = 0; idx < 256; idx++) { ust1 = pbyst[idx]; stateindex = (stateindex + pbyKey[keyindex] + ust1) & 0xff; ust2 = pbyst[stateindex]; pbyst[stateindex] = (BYTE)ust1; pbyst[idx] = (BYTE)ust2; if (++keyindex >= cbKey_len) keyindex = 0; } } UINT rc4_byte(PRC4Ext pRC4) { UINT ux; UINT uy; UINT ustx, usty; PBYTE pbyst; pbyst = pRC4->abystate; ux = (pRC4->ux + 1) & 0xff; ustx = pbyst[ux]; uy = (ustx + pRC4->uy) & 0xff; usty = pbyst[uy]; pRC4->ux = ux; pRC4->uy = uy; pbyst[uy] = (BYTE)ustx; pbyst[ux] = (BYTE)usty; return pbyst[(ustx + usty) & 0xff]; } void rc4_encrypt(PRC4Ext pRC4, PBYTE pbyDest, PBYTE pbySrc, UINT cbData_len) { UINT ii; for (ii = 0; ii < cbData_len; ii++) pbyDest[ii] = (BYTE)(pbySrc[ii] ^ rc4_byte(pRC4)); }
gpl-2.0
t0mm13b/CAF-Zte-Blade-Android-MSM-2.6.35
drivers/scsi/fnic/fnic_main.c
761
26586
/* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/mempool.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/skbuff.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/if_ether.h> #include <scsi/fc/fc_fip.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_tcq.h> #include <scsi/libfc.h> #include <scsi/fc_frame.h> #include "vnic_dev.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" #include "fnic.h" #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 /* Timer to poll notification area for events. Used for MSI interrupts */ #define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ) static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES]; static struct kmem_cache *fnic_io_req_cache; LIST_HEAD(fnic_list); DEFINE_SPINLOCK(fnic_list_lock); /* Supported devices by fnic module */ static struct pci_device_id fnic_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) }, { 0, } }; MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, " "Joseph R. Eykholt <jeykholt@cisco.com>"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, fnic_id_table); unsigned int fnic_log_level; module_param(fnic_log_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); static struct libfc_function_template fnic_transport_template = { .frame_send = fnic_send, .lport_set_port_id = fnic_set_port_id, .fcp_abort_io = fnic_empty_scsi_cleanup, .fcp_cleanup = fnic_empty_scsi_cleanup, .exch_mgr_reset = fnic_exch_mgr_reset }; static int fnic_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct fc_lport *lp = shost_priv(sdev->host); struct fnic *fnic = lport_priv(lp); sdev->tagged_supported = 1; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000; return 0; } static struct scsi_host_template fnic_host_template = { .module = THIS_MODULE, .name = DRV_NAME, .queuecommand = fnic_queuecommand, .eh_abort_handler = fnic_abort_cmd, .eh_device_reset_handler = fnic_device_reset, .eh_host_reset_handler = fnic_host_reset, .slave_alloc = fnic_slave_alloc, .change_queue_depth = fc_change_queue_depth, .change_queue_type = fc_change_queue_type, .this_id = -1, .cmd_per_lun = 3, .can_queue = FNIC_MAX_IO_REQ, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = FNIC_MAX_SG_DESC_CNT, .max_sectors = 0xffff, .shost_attrs = fnic_attrs, }; static void fnic_get_host_speed(struct Scsi_Host *shost); static struct scsi_transport_template *fnic_fc_transport; static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); static struct fc_function_template fnic_fc_functions = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_active_fc4s = 1, .show_host_maxframe_size = 1, .show_host_port_id = 1, .show_host_supported_speeds = 1, .get_host_speed = fnic_get_host_speed, .show_host_speed = 1, .show_host_port_type = 1, .get_host_port_state = fc_get_host_port_state, .show_host_port_state = 1, .show_host_symbolic_name = 1, .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_host_fabric_name = 1, .show_starget_node_name = 1, .show_starget_port_name = 1, .show_starget_port_id = 1, .show_rport_dev_loss_tmo = 1, .issue_fc_host_lip = fnic_reset, .get_fc_host_stats = fnic_get_stats, .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), .terminate_rport_io = fnic_terminate_rport_io, .bsg_request = fc_lport_bsg_request, }; static void fnic_get_host_speed(struct Scsi_Host *shost) { struct fc_lport *lp = shost_priv(shost); struct fnic *fnic = lport_priv(lp); u32 port_speed = vnic_dev_port_speed(fnic->vdev); /* Add in other values as they get defined in fw */ switch (port_speed) { case 10000: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; default: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; } } static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) { int ret; struct fc_lport *lp = shost_priv(host); struct fnic *fnic = lport_priv(lp); struct fc_host_statistics *stats = &lp->host_stats; struct vnic_stats *vs; unsigned long flags; if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) return stats; fnic->stats_time = jiffies; spin_lock_irqsave(&fnic->fnic_lock, flags); ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (ret) { FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, "fnic: Get vnic stats failed" " 0x%x", ret); return stats; } vs = fnic->stats; stats->tx_frames = vs->tx.tx_unicast_frames_ok; stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; stats->rx_frames = vs->rx.rx_unicast_frames_ok; stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; stats->invalid_crc_count = vs->rx.rx_crc_errors; stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ; stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); return stats; } void fnic_log_q_error(struct fnic *fnic) { unsigned int i; u32 error_status; for (i = 0; i < fnic->raw_wq_count; i++) { error_status = ioread32(&fnic->wq[i].ctrl->error_status); if (error_status) shost_printk(KERN_ERR, fnic->lport->host, "WQ[%d] error_status" " %d\n", i, error_status); } for (i = 0; i < fnic->rq_count; i++) { error_status = ioread32(&fnic->rq[i].ctrl->error_status); if (error_status) shost_printk(KERN_ERR, fnic->lport->host, "RQ[%d] error_status" " %d\n", i, error_status); } for (i = 0; i < fnic->wq_copy_count; i++) { error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status); if (error_status) shost_printk(KERN_ERR, fnic->lport->host, "CWQ[%d] error_status" " %d\n", i, error_status); } } void fnic_handle_link_event(struct fnic *fnic) { unsigned long flags; spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->stop_rx_link_events) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); return; } spin_unlock_irqrestore(&fnic->fnic_lock, flags); queue_work(fnic_event_queue, &fnic->link_work); } static int fnic_notify_set(struct fnic *fnic) { int err; switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY); break; case VNIC_DEV_INTR_MODE_MSI: err = vnic_dev_notify_set(fnic->vdev, -1); break; case VNIC_DEV_INTR_MODE_MSIX: err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY); break; default: shost_printk(KERN_ERR, fnic->lport->host, "Interrupt mode should be set up" " before devcmd notify set %d\n", vnic_dev_get_intr_mode(fnic->vdev)); err = -1; break; } return err; } static void fnic_notify_timer(unsigned long data) { struct fnic *fnic = (struct fnic *)data; fnic_handle_link_event(fnic); mod_timer(&fnic->notify_timer, round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); } static void fnic_notify_timer_start(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_MSI: /* * Schedule first timeout immediately. The driver is * initiatialized and ready to look for link up notification */ mod_timer(&fnic->notify_timer, jiffies); break; default: /* Using intr for notification for INTx/MSI-X */ break; }; } static int fnic_dev_wait(struct vnic_dev *vdev, int (*start)(struct vnic_dev *, int), int (*finished)(struct vnic_dev *, int *), int arg) { unsigned long time; int done; int err; err = start(vdev, arg); if (err) return err; /* Wait for func to complete...2 seconds max */ time = jiffies + (HZ * 2); do { err = finished(vdev, &done); if (err) return err; if (done) return 0; schedule_timeout_uninterruptible(HZ / 10); } while (time_after(time, jiffies)); return -ETIMEDOUT; } static int fnic_cleanup(struct fnic *fnic) { unsigned int i; int err; vnic_dev_disable(fnic->vdev); for (i = 0; i < fnic->intr_count; i++) vnic_intr_mask(&fnic->intr[i]); for (i = 0; i < fnic->rq_count; i++) { err = vnic_rq_disable(&fnic->rq[i]); if (err) return err; } for (i = 0; i < fnic->raw_wq_count; i++) { err = vnic_wq_disable(&fnic->wq[i]); if (err) return err; } for (i = 0; i < fnic->wq_copy_count; i++) { err = vnic_wq_copy_disable(&fnic->wq_copy[i]); if (err) return err; } /* Clean up completed IOs and FCS frames */ fnic_wq_copy_cmpl_handler(fnic, -1); fnic_wq_cmpl_handler(fnic, -1); fnic_rq_cmpl_handler(fnic, -1); /* Clean up the IOs and FCS frames that have not completed */ for (i = 0; i < fnic->raw_wq_count; i++) vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf); for (i = 0; i < fnic->rq_count; i++) vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); for (i = 0; i < fnic->wq_copy_count; i++) vnic_wq_copy_clean(&fnic->wq_copy[i], fnic_wq_copy_cleanup_handler); for (i = 0; i < fnic->cq_count; i++) vnic_cq_clean(&fnic->cq[i]); for (i = 0; i < fnic->intr_count; i++) vnic_intr_clean(&fnic->intr[i]); mempool_destroy(fnic->io_req_pool); for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) mempool_destroy(fnic->io_sgl_pool[i]); return 0; } static void fnic_iounmap(struct fnic *fnic) { if (fnic->bar0.vaddr) iounmap(fnic->bar0.vaddr); } /* * Allocate element for mempools requiring GFP_DMA flag. * Otherwise, checks in kmem_flagcheck() hit BUG_ON(). */ static void *fnic_alloc_slab_dma(gfp_t gfp_mask, void *pool_data) { struct kmem_cache *mem = pool_data; return kmem_cache_alloc(mem, gfp_mask | GFP_ATOMIC | GFP_DMA); } /** * fnic_get_mac() - get assigned data MAC address for FIP code. * @lport: local port. */ static u8 *fnic_get_mac(struct fc_lport *lport) { struct fnic *fnic = lport_priv(lport); return fnic->data_src_addr; } static int __devinit fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct Scsi_Host *host; struct fc_lport *lp; struct fnic *fnic; mempool_t *pool; int err; int i; unsigned long flags; /* * Allocate SCSI Host and set up association between host, * local port, and fnic */ lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic)); if (!lp) { printk(KERN_ERR PFX "Unable to alloc libfc local port\n"); err = -ENOMEM; goto err_out; } host = lp->host; fnic = lport_priv(lp); fnic->lport = lp; fnic->ctlr.lp = lp; snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, host->host_no); host->transportt = fnic_fc_transport; err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to alloc shared tag map\n"); goto err_out_free_hba; } /* Setup PCI resources */ pci_set_drvdata(pdev, fnic); fnic->pdev = pdev; err = pci_enable_device(pdev); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Cannot enable PCI device, aborting.\n"); goto err_out_free_hba; } err = pci_request_regions(pdev, DRV_NAME); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Cannot enable PCI resources, aborting\n"); goto err_out_disable_device; } pci_set_master(pdev); /* Query PCI controller on system for DMA addressing * limitation for the device. Try 40-bit first, and * fail to 32-bit. */ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "No usable DMA configuration " "aborting\n"); goto err_out_release_regions; } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to obtain 32-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } } else { err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to obtain 40-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } } /* Map vNIC resources from BAR0 */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { shost_printk(KERN_ERR, fnic->lport->host, "BAR0 not memory-map'able, aborting.\n"); err = -ENODEV; goto err_out_release_regions; } fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); fnic->bar0.bus_addr = pci_resource_start(pdev, 0); fnic->bar0.len = pci_resource_len(pdev, 0); if (!fnic->bar0.vaddr) { shost_printk(KERN_ERR, fnic->lport->host, "Cannot memory-map BAR0 res hdr, " "aborting.\n"); err = -ENODEV; goto err_out_release_regions; } fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0); if (!fnic->vdev) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC registration failed, " "aborting.\n"); err = -ENODEV; goto err_out_iounmap; } err = fnic_dev_wait(fnic->vdev, vnic_dev_open, vnic_dev_open_done, 0); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC dev open failed, aborting.\n"); goto err_out_vnic_unregister; } err = vnic_dev_init(fnic->vdev, 0); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC dev init failed, aborting.\n"); goto err_out_dev_close; } err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC get MAC addr failed \n"); goto err_out_dev_close; } /* set data_src for point-to-point mode and to keep it non-zero */ memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); /* Get vNIC configuration */ err = fnic_get_vnic_config(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Get vNIC configuration failed, " "aborting.\n"); goto err_out_dev_close; } host->max_lun = fnic->config.luns_per_tgt; host->max_id = FNIC_MAX_FCP_TARGET; host->max_cmd_len = FCOE_MAX_CMD_LEN; fnic_get_res_counts(fnic); err = fnic_set_intr_mode(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to set intr mode, " "aborting.\n"); goto err_out_dev_close; } err = fnic_alloc_vnic_resources(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to alloc vNIC resources, " "aborting.\n"); goto err_out_clear_intr; } /* initialize all fnic locks */ spin_lock_init(&fnic->fnic_lock); for (i = 0; i < FNIC_WQ_MAX; i++) spin_lock_init(&fnic->wq_lock[i]); for (i = 0; i < FNIC_WQ_COPY_MAX; i++) { spin_lock_init(&fnic->wq_copy_lock[i]); fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK; fnic->fw_ack_recd[i] = 0; fnic->fw_ack_index[i] = -1; } for (i = 0; i < FNIC_IO_LOCKS; i++) spin_lock_init(&fnic->io_req_lock[i]); fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); if (!fnic->io_req_pool) goto err_out_free_resources; pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); if (!pool) goto err_out_free_ioreq_pool; fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); if (!pool) goto err_out_free_dflt_pool; fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; /* setup vlan config, hw inserts vlan header */ fnic->vlan_hw_insert = 1; fnic->vlan_id = 0; /* Initialize the FIP fcoe_ctrl struct */ fnic->ctlr.send = fnic_eth_send; fnic->ctlr.update_mac = fnic_update_mac; fnic->ctlr.get_src_addr = fnic_get_mac; fcoe_ctlr_init(&fnic->ctlr); if (fnic->config.flags & VFCF_FIP_CAPABLE) { shost_printk(KERN_INFO, fnic->lport->host, "firmware supports FIP\n"); /* enable directed and multicast */ vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); } else { shost_printk(KERN_INFO, fnic->lport->host, "firmware uses non-FIP mode\n"); fnic->ctlr.mode = FIP_ST_NON_FIP; } fnic->state = FNIC_IN_FC_MODE; /* Enable hardware stripping of vlan header on ingress */ fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1); /* Setup notification buffer area */ err = fnic_notify_set(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to alloc notify buffer, aborting.\n"); goto err_out_free_max_pool; } /* Setup notify timer when using MSI interrupts */ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) setup_timer(&fnic->notify_timer, fnic_notify_timer, (unsigned long)fnic); /* allocate RQ buffers and post them to RQ*/ for (i = 0; i < fnic->rq_count; i++) { err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "fnic_alloc_rq_frame can't alloc " "frame\n"); goto err_out_free_rq_buf; } } /* * Initialization done with PCI system, hardware, firmware. * Add host to SCSI */ err = scsi_add_host(lp->host, &pdev->dev); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "fnic: scsi_add_host failed...exiting\n"); goto err_out_free_rq_buf; } /* Start local port initiatialization */ lp->link_up = 0; lp->tt = fnic_transport_template; lp->max_retry_count = fnic->config.flogi_retries; lp->max_rport_retry_count = fnic->config.plogi_retries; lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_CONF_COMPL); if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) lp->service_params |= FCP_SPPF_RETRY; lp->boot_time = jiffies; lp->e_d_tov = fnic->config.ed_tov; lp->r_a_tov = fnic->config.ra_tov; lp->link_supported_speeds = FC_PORTSPEED_10GBIT; fc_set_wwnn(lp, fnic->config.node_wwn); fc_set_wwpn(lp, fnic->config.port_wwn); fc_lport_init(lp); fc_exch_init(lp); fc_elsct_init(lp); fc_rport_init(lp); fc_disc_init(lp); if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, FCPIO_HOST_EXCH_RANGE_END, NULL)) { err = -ENOMEM; goto err_out_remove_scsi_host; } fc_lport_init_stats(lp); fc_lport_config(lp); if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + sizeof(struct fc_frame_header))) { err = -EINVAL; goto err_out_free_exch_mgr; } fc_host_maxframe_size(lp->host) = lp->mfs; sprintf(fc_host_symbolic_name(lp->host), DRV_NAME " v" DRV_VERSION " over %s", fnic->name); spin_lock_irqsave(&fnic_list_lock, flags); list_add_tail(&fnic->list, &fnic_list); spin_unlock_irqrestore(&fnic_list_lock, flags); INIT_WORK(&fnic->link_work, fnic_handle_link); INIT_WORK(&fnic->frame_work, fnic_handle_frame); skb_queue_head_init(&fnic->frame_queue); skb_queue_head_init(&fnic->tx_queue); /* Enable all queues */ for (i = 0; i < fnic->raw_wq_count; i++) vnic_wq_enable(&fnic->wq[i]); for (i = 0; i < fnic->rq_count; i++) vnic_rq_enable(&fnic->rq[i]); for (i = 0; i < fnic->wq_copy_count; i++) vnic_wq_copy_enable(&fnic->wq_copy[i]); fc_fabric_login(lp); vnic_dev_enable(fnic->vdev); err = fnic_request_intr(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to request irq.\n"); goto err_out_free_exch_mgr; } for (i = 0; i < fnic->intr_count; i++) vnic_intr_unmask(&fnic->intr[i]); fnic_notify_timer_start(fnic); return 0; err_out_free_exch_mgr: fc_exch_mgr_free(lp); err_out_remove_scsi_host: fc_remove_host(lp->host); scsi_remove_host(lp->host); err_out_free_rq_buf: for (i = 0; i < fnic->rq_count; i++) vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); vnic_dev_notify_unset(fnic->vdev); err_out_free_max_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); err_out_free_dflt_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); err_out_free_ioreq_pool: mempool_destroy(fnic->io_req_pool); err_out_free_resources: fnic_free_vnic_resources(fnic); err_out_clear_intr: fnic_clear_intr_mode(fnic); err_out_dev_close: vnic_dev_close(fnic->vdev); err_out_vnic_unregister: vnic_dev_unregister(fnic->vdev); err_out_iounmap: fnic_iounmap(fnic); err_out_release_regions: pci_release_regions(pdev); err_out_disable_device: pci_disable_device(pdev); err_out_free_hba: scsi_host_put(lp->host); err_out: return err; } static void __devexit fnic_remove(struct pci_dev *pdev) { struct fnic *fnic = pci_get_drvdata(pdev); struct fc_lport *lp = fnic->lport; unsigned long flags; /* * Mark state so that the workqueue thread stops forwarding * received frames and link events to the local port. ISR and * other threads that can queue work items will also stop * creating work items on the fnic workqueue */ spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->stop_rx_link_events = 1; spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) del_timer_sync(&fnic->notify_timer); /* * Flush the fnic event queue. After this call, there should * be no event queued for this fnic device in the workqueue */ flush_workqueue(fnic_event_queue); skb_queue_purge(&fnic->frame_queue); skb_queue_purge(&fnic->tx_queue); /* * Log off the fabric. This stops all remote ports, dns port, * logs off the fabric. This flushes all rport, disc, lport work * before returning */ fc_fabric_logoff(fnic->lport); spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->in_remove = 1; spin_unlock_irqrestore(&fnic->fnic_lock, flags); fcoe_ctlr_destroy(&fnic->ctlr); fc_lport_destroy(lp); /* * This stops the fnic device, masks all interrupts. Completed * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are * cleaned up */ fnic_cleanup(fnic); BUG_ON(!skb_queue_empty(&fnic->frame_queue)); BUG_ON(!skb_queue_empty(&fnic->tx_queue)); spin_lock_irqsave(&fnic_list_lock, flags); list_del(&fnic->list); spin_unlock_irqrestore(&fnic_list_lock, flags); fc_remove_host(fnic->lport->host); scsi_remove_host(fnic->lport->host); fc_exch_mgr_free(fnic->lport); vnic_dev_notify_unset(fnic->vdev); fnic_free_intr(fnic); fnic_free_vnic_resources(fnic); fnic_clear_intr_mode(fnic); vnic_dev_close(fnic->vdev); vnic_dev_unregister(fnic->vdev); fnic_iounmap(fnic); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); scsi_host_put(lp->host); } static struct pci_driver fnic_driver = { .name = DRV_NAME, .id_table = fnic_id_table, .probe = fnic_probe, .remove = __devexit_p(fnic_remove), }; static int __init fnic_init_module(void) { size_t len; int err = 0; printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); /* Create a cache for allocation of default size sgls */ len = sizeof(struct fnic_dflt_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) { printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n"); err = -ENOMEM; goto err_create_fnic_sgl_slab_dflt; } /* Create a cache for allocation of max size sgls*/ len = sizeof(struct fnic_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); err = -ENOMEM; goto err_create_fnic_sgl_slab_max; } /* Create a cache of io_req structs for use via mempool */ fnic_io_req_cache = kmem_cache_create("fnic_io_req", sizeof(struct fnic_io_req), 0, SLAB_HWCACHE_ALIGN, NULL); if (!fnic_io_req_cache) { printk(KERN_ERR PFX "failed to create fnic io_req slab\n"); err = -ENOMEM; goto err_create_fnic_ioreq_slab; } fnic_event_queue = create_singlethread_workqueue("fnic_event_wq"); if (!fnic_event_queue) { printk(KERN_ERR PFX "fnic work queue create failed\n"); err = -ENOMEM; goto err_create_fnic_workq; } spin_lock_init(&fnic_list_lock); INIT_LIST_HEAD(&fnic_list); fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); if (!fnic_fc_transport) { printk(KERN_ERR PFX "fc_attach_transport error\n"); err = -ENOMEM; goto err_fc_transport; } /* register the driver with PCI system */ err = pci_register_driver(&fnic_driver); if (err < 0) { printk(KERN_ERR PFX "pci register error\n"); goto err_pci_register; } return err; err_pci_register: fc_release_transport(fnic_fc_transport); err_fc_transport: destroy_workqueue(fnic_event_queue); err_create_fnic_workq: kmem_cache_destroy(fnic_io_req_cache); err_create_fnic_ioreq_slab: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); err_create_fnic_sgl_slab_max: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); err_create_fnic_sgl_slab_dflt: return err; } static void __exit fnic_cleanup_module(void) { pci_unregister_driver(&fnic_driver); destroy_workqueue(fnic_event_queue); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); kmem_cache_destroy(fnic_io_req_cache); fc_release_transport(fnic_fc_transport); } module_init(fnic_init_module); module_exit(fnic_cleanup_module);
gpl-2.0
samno1607/NO-IDEA
drivers/staging/iio/accel/sca3000_core.c
761
38756
/* * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * Copyright (c) 2009 Jonathan Cameron <jic23@cam.ac.uk> * * See industrialio/accels/sca3000.h for comments. */ #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/sysfs.h> #include "../iio.h" #include "../sysfs.h" #include "../ring_generic.h" #include "accel.h" #include "sca3000.h" enum sca3000_variant { d01, e02, e04, e05, }; /* Note where option modes are not defined, the chip simply does not * support any. * Other chips in the sca3000 series use i2c and are not included here. * * Some of these devices are only listed in the family data sheet and * do not actually appear to be available. */ static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = { { .name = "sca3000-d01", .scale = " 0.0073575", .temp_output = true, .measurement_mode_freq = 250, .option_mode_1 = SCA3000_OP_MODE_BYPASS, .option_mode_1_freq = 250, }, { .name = "sca3000-e02", .scale = "0.00981", .measurement_mode_freq = 125, .option_mode_1 = SCA3000_OP_MODE_NARROW, .option_mode_1_freq = 63, }, { .name = "sca3000-e04", .scale = "0.01962", .measurement_mode_freq = 100, .option_mode_1 = SCA3000_OP_MODE_NARROW, .option_mode_1_freq = 50, .option_mode_2 = SCA3000_OP_MODE_WIDE, .option_mode_2_freq = 400, }, { .name = "sca3000-e05", .scale = "0.0613125", .measurement_mode_freq = 200, .option_mode_1 = SCA3000_OP_MODE_NARROW, .option_mode_1_freq = 50, .option_mode_2 = SCA3000_OP_MODE_WIDE, .option_mode_2_freq = 400, }, }; int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val) { struct spi_transfer xfer = { .bits_per_word = 8, .len = 2, .cs_change = 1, .tx_buf = st->tx, }; struct spi_message msg; st->tx[0] = SCA3000_WRITE_REG(address); st->tx[1] = val; spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); return spi_sync(st->us, &msg); } int sca3000_read_data(struct sca3000_state *st, uint8_t reg_address_high, u8 **rx_p, int len) { int ret; struct spi_message msg; struct spi_transfer xfer = { .bits_per_word = 8, .len = len + 1, .cs_change = 1, .tx_buf = st->tx, }; *rx_p = kmalloc(len + 1, GFP_KERNEL); if (*rx_p == NULL) { ret = -ENOMEM; goto error_ret; } xfer.rx_buf = *rx_p; st->tx[0] = SCA3000_READ_REG(reg_address_high); spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->us, &msg); if (ret) { dev_err(get_device(&st->us->dev), "problem reading register"); goto error_free_rx; } return 0; error_free_rx: kfree(*rx_p); error_ret: return ret; } /** * sca3000_reg_lock_on() test if the ctrl register lock is on * * Lock must be held. **/ static int sca3000_reg_lock_on(struct sca3000_state *st) { u8 *rx; int ret; ret = sca3000_read_data(st, SCA3000_REG_ADDR_STATUS, &rx, 1); if (ret < 0) return ret; ret = !(rx[1] & SCA3000_LOCKED); kfree(rx); return ret; } /** * __sca3000_unlock_reg_lock() unlock the control registers * * Note the device does not appear to support doing this in a single transfer. * This should only ever be used as part of ctrl reg read. * Lock must be held before calling this **/ static int __sca3000_unlock_reg_lock(struct sca3000_state *st) { struct spi_message msg; struct spi_transfer xfer[3] = { { .bits_per_word = 8, .len = 2, .cs_change = 1, .tx_buf = st->tx, }, { .bits_per_word = 8, .len = 2, .cs_change = 1, .tx_buf = st->tx + 2, }, { .bits_per_word = 8, .len = 2, .cs_change = 1, .tx_buf = st->tx + 4, }, }; st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK); st->tx[1] = 0x00; st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK); st->tx[3] = 0x50; st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK); st->tx[5] = 0xA0; spi_message_init(&msg); spi_message_add_tail(&xfer[0], &msg); spi_message_add_tail(&xfer[1], &msg); spi_message_add_tail(&xfer[2], &msg); return spi_sync(st->us, &msg); } /** * sca3000_write_ctrl_reg() write to a lock protect ctrl register * @sel: selects which registers we wish to write to * @val: the value to be written * * Certain control registers are protected against overwriting by the lock * register and use a shared write address. This function allows writing of * these registers. * Lock must be held. **/ static int sca3000_write_ctrl_reg(struct sca3000_state *st, uint8_t sel, uint8_t val) { int ret; ret = sca3000_reg_lock_on(st); if (ret < 0) goto error_ret; if (ret) { ret = __sca3000_unlock_reg_lock(st); if (ret) goto error_ret; } /* Set the control select register */ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, sel); if (ret) goto error_ret; /* Write the actual value into the register */ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_DATA, val); error_ret: return ret; } /* Crucial that lock is called before calling this */ /** * sca3000_read_ctrl_reg() read from lock protected control register. * * Lock must be held. **/ static int sca3000_read_ctrl_reg(struct sca3000_state *st, u8 ctrl_reg, u8 **rx_p) { int ret; ret = sca3000_reg_lock_on(st); if (ret < 0) goto error_ret; if (ret) { ret = __sca3000_unlock_reg_lock(st); if (ret) goto error_ret; } /* Set the control select register */ ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, ctrl_reg); if (ret) goto error_ret; ret = sca3000_read_data(st, SCA3000_REG_ADDR_CTRL_DATA, rx_p, 1); error_ret: return ret; } #ifdef SCA3000_DEBUG /** * sca3000_check_status() check the status register * * Only used for debugging purposes **/ static int sca3000_check_status(struct device *dev) { u8 *rx; int ret; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct sca3000_state *st = indio_dev->dev_data; mutex_lock(&st->lock); ret = sca3000_read_data(st, SCA3000_REG_ADDR_STATUS, &rx, 1); if (ret < 0) goto error_ret; if (rx[1] & SCA3000_EEPROM_CS_ERROR) dev_err(dev, "eeprom error\n"); if (rx[1] & SCA3000_SPI_FRAME_ERROR) dev_err(dev, "Previous SPI Frame was corrupt\n"); kfree(rx); error_ret: mutex_unlock(&st->lock); return ret; } #endif /* SCA3000_DEBUG */ /** * sca3000_read_13bit_signed() sysfs interface to read 13 bit signed registers * * These are described as signed 12 bit on the data sheet, which appears * to be a conventional 2's complement 13 bit. **/ static ssize_t sca3000_read_13bit_signed(struct device *dev, struct device_attribute *attr, char *buf) { int len = 0, ret; int val; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); u8 *rx; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct sca3000_state *st = indio_dev->dev_data; mutex_lock(&st->lock); ret = sca3000_read_data(st, this_attr->address, &rx, 2); if (ret < 0) goto error_ret; val = sca3000_13bit_convert(rx[1], rx[2]); len += sprintf(buf + len, "%d\n", val); kfree(rx); error_ret: mutex_unlock(&st->lock); return ret ? ret : len; } static ssize_t sca3000_show_scale(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct sca3000_state *st = dev_info->dev_data; return sprintf(buf, "%s\n", st->info->scale); } static ssize_t sca3000_show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct sca3000_state *st = dev_info->dev_data; return sprintf(buf, "%s\n", st->info->name); } /** * sca3000_show_reg() - sysfs interface to read the chip revision number **/ static ssize_t sca3000_show_rev(struct device *dev, struct device_attribute *attr, char *buf) { int len = 0, ret; struct iio_dev *dev_info = dev_get_drvdata(dev); struct sca3000_state *st = dev_info->dev_data; u8 *rx; mutex_lock(&st->lock); ret = sca3000_read_data(st, SCA3000_REG_ADDR_REVID, &rx, 1); if (ret < 0) goto error_ret; len += sprintf(buf + len, "major=%d, minor=%d\n", rx[1] & SCA3000_REVID_MAJOR_MASK, rx[1] & SCA3000_REVID_MINOR_MASK); kfree(rx); error_ret: mutex_unlock(&st->lock); return ret ? ret : len; } /** * sca3000_show_available_measurement_modes() display available modes * * This is all read from chip specific data in the driver. Not all * of the sca3000 series support modes other than normal. **/ static ssize_t sca3000_show_available_measurement_modes(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct sca3000_state *st = dev_info->dev_data; int len = 0; len += sprintf(buf + len, "0 - normal mode"); switch (st->info->option_mode_1) { case SCA3000_OP_MODE_NARROW: len += sprintf(buf + len, ", 1 - narrow mode"); break; case SCA3000_OP_MODE_BYPASS: len += sprintf(buf + len, ", 1 - bypass mode"); break; }; switch (st->info->option_mode_2) { case SCA3000_OP_MODE_WIDE: len += sprintf(buf + len, ", 2 - wide mode"); break; } /* always supported */ len += sprintf(buf + len, " 3 - motion detection\n"); return len; } /** * sca3000_show_measurmenet_mode() sysfs read of current mode **/ static ssize_t sca3000_show_measurement_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct sca3000_state *st = dev_info->dev_data; int len = 0, ret; u8 *rx; mutex_lock(&st->lock); ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1); if (ret) goto error_ret; /* mask bottom 2 bits - only ones that are relevant */ rx[1] &= 0x03; switch (rx[1]) { case SCA3000_MEAS_MODE_NORMAL: len += sprintf(buf + len, "0 - normal mode\n"); break; case SCA3000_MEAS_MODE_MOT_DET: len += sprintf(buf + len, "3 - motion detection\n"); break; case SCA3000_MEAS_MODE_OP_1: switch (st->info->option_mode_1) { case SCA3000_OP_MODE_NARROW: len += sprintf(buf + len, "1 - narrow mode\n"); break; case SCA3000_OP_MODE_BYPASS: len += sprintf(buf + len, "1 - bypass mode\n"); break; }; break; case SCA3000_MEAS_MODE_OP_2: switch (st->info->option_mode_2) { case SCA3000_OP_MODE_WIDE: len += sprintf(buf + len, "2 - wide mode\n"); break; } break; }; error_ret: mutex_unlock(&st->lock); return ret ? ret : len; } /** * sca3000_store_measurement_mode() set the current mode **/ static ssize_t sca3000_store_measurement_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *dev_info = dev_get_drvdata(dev); struct sca3000_state *st = dev_info->dev_data; int ret; u8 *rx; int mask = 0x03; long val; mutex_lock(&st->lock); ret = strict_strtol(buf, 10, &val); if (ret) goto error_ret; ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1); if (ret) goto error_ret; rx[1] &= ~mask; rx[1] |= (val & mask); ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, rx[1]); if (ret) goto error_free_rx; mutex_unlock(&st->lock); return len; error_free_rx: kfree(rx); error_ret: mutex_unlock(&st->lock); return ret; } /* Not even vaguely standard attributes so defined here rather than * in the relevant IIO core headers */ static IIO_DEVICE_ATTR(measurement_mode_available, S_IRUGO, sca3000_show_available_measurement_modes, NULL, 0); static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR, sca3000_show_measurement_mode, sca3000_store_measurement_mode, 0); /* More standard attributes */ static IIO_DEV_ATTR_NAME(sca3000_show_name); static IIO_DEV_ATTR_REV(sca3000_show_rev); static IIO_DEVICE_ATTR(accel_scale, S_IRUGO, sca3000_show_scale, NULL, 0); static IIO_DEV_ATTR_ACCEL_X(sca3000_read_13bit_signed, SCA3000_REG_ADDR_X_MSB); static IIO_DEV_ATTR_ACCEL_Y(sca3000_read_13bit_signed, SCA3000_REG_ADDR_Y_MSB); static IIO_DEV_ATTR_ACCEL_Z(sca3000_read_13bit_signed, SCA3000_REG_ADDR_Z_MSB); /** * sca3000_read_av_freq() sysfs function to get available frequencies * * The later modes are only relevant to the ring buffer - and depend on current * mode. Note that data sheet gives rather wide tolerances for these so integer * division will give good enough answer and not all chips have them specified * at all. **/ static ssize_t sca3000_read_av_freq(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct sca3000_state *st = indio_dev->dev_data; int len = 0, ret; u8 *rx; mutex_lock(&st->lock); ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1); mutex_unlock(&st->lock); if (ret) goto error_ret; rx[1] &= 0x03; switch (rx[1]) { case SCA3000_MEAS_MODE_NORMAL: len += sprintf(buf + len, "%d %d %d\n", st->info->measurement_mode_freq, st->info->measurement_mode_freq/2, st->info->measurement_mode_freq/4); break; case SCA3000_MEAS_MODE_OP_1: len += sprintf(buf + len, "%d %d %d\n", st->info->option_mode_1_freq, st->info->option_mode_1_freq/2, st->info->option_mode_1_freq/4); break; case SCA3000_MEAS_MODE_OP_2: len += sprintf(buf + len, "%d %d %d\n", st->info->option_mode_2_freq, st->info->option_mode_2_freq/2, st->info->option_mode_2_freq/4); break; }; kfree(rx); return len; error_ret: return ret; } /** * __sca3000_get_base_frequency() obtain mode specific base frequency * * lock must be held **/ static inline int __sca3000_get_base_freq(struct sca3000_state *st, const struct sca3000_chip_info *info, int *base_freq) { int ret; u8 *rx; ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1); if (ret) goto error_ret; switch (0x03 & rx[1]) { case SCA3000_MEAS_MODE_NORMAL: *base_freq = info->measurement_mode_freq; break; case SCA3000_MEAS_MODE_OP_1: *base_freq = info->option_mode_1_freq; break; case SCA3000_MEAS_MODE_OP_2: *base_freq = info->option_mode_2_freq; break; }; kfree(rx); error_ret: return ret; } /** * sca3000_read_frequency() sysfs interface to get the current frequency **/ static ssize_t sca3000_read_frequency(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct sca3000_state *st = indio_dev->dev_data; int ret, len = 0, base_freq = 0; u8 *rx; mutex_lock(&st->lock); ret = __sca3000_get_base_freq(st, st->info, &base_freq); if (ret) goto error_ret_mut; ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL, &rx); mutex_unlock(&st->lock); if (ret) goto error_ret; if (base_freq > 0) switch (rx[1]&0x03) { case 0x00: case 0x03: len = sprintf(buf, "%d\n", base_freq); break; case 0x01: len = sprintf(buf, "%d\n", base_freq/2); break; case 0x02: len = sprintf(buf, "%d\n", base_freq/4); break; }; kfree(rx); return len; error_ret_mut: mutex_unlock(&st->lock); error_ret: return ret; } /** * sca3000_set_frequency() sysfs interface to set the current frequency **/ static ssize_t sca3000_set_frequency(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct sca3000_state *st = indio_dev->dev_data; int ret, base_freq = 0; u8 *rx; long val; ret = strict_strtol(buf, 10, &val); if (ret) return ret; mutex_lock(&st->lock); /* What mode are we in? */ ret = __sca3000_get_base_freq(st, st->info, &base_freq); if (ret) goto error_free_lock; ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL, &rx); if (ret) goto error_free_lock; /* clear the bits */ rx[1] &= ~0x03; if (val == base_freq/2) { rx[1] |= SCA3000_OUT_CTRL_BUF_DIV_2; } else if (val == base_freq/4) { rx[1] |= SCA3000_OUT_CTRL_BUF_DIV_4; } else if (val != base_freq) { ret = -EINVAL; goto error_free_lock; } ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL, rx[1]); error_free_lock: mutex_unlock(&st->lock); return ret ? ret : len; } /* Should only really be registered if ring buffer support is compiled in. * Does no harm however and doing it right would add a fair bit of complexity */ static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq); static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, sca3000_read_frequency, sca3000_set_frequency); /** * sca3000_read_temp() sysfs interface to get the temperature when available * * The alignment of data in here is downright odd. See data sheet. * Converting this into a meaningful value is left to inline functions in * userspace part of header. **/ static ssize_t sca3000_read_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct sca3000_state *st = indio_dev->dev_data; int len = 0, ret; int val; u8 *rx; ret = sca3000_read_data(st, SCA3000_REG_ADDR_TEMP_MSB, &rx, 2); if (ret < 0) goto error_ret; val = ((rx[1]&0x3F) << 3) | ((rx[2] & 0xE0) >> 5); len += sprintf(buf + len, "%d\n", val); kfree(rx); return len; error_ret: return ret; } static IIO_DEV_ATTR_TEMP_RAW(sca3000_read_temp); static IIO_CONST_ATTR(temp_scale, "0.555556"); static IIO_CONST_ATTR(temp_offset, "-214.6"); /** * sca3000_show_thresh() sysfs query of a threshold **/ static ssize_t sca3000_show_thresh(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct sca3000_state *st = indio_dev->dev_data; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int len = 0, ret; u8 *rx; mutex_lock(&st->lock); ret = sca3000_read_ctrl_reg(st, this_attr->address, &rx); mutex_unlock(&st->lock); if (ret) return ret; len += sprintf(buf + len, "%d\n", rx[1]); kfree(rx); return len; } /** * sca3000_write_thresh() sysfs control of threshold **/ static ssize_t sca3000_write_thresh(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct sca3000_state *st = indio_dev->dev_data; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret; long val; ret = strict_strtol(buf, 10, &val); if (ret) return ret; mutex_lock(&st->lock); ret = sca3000_write_ctrl_reg(st, this_attr->address, val); mutex_unlock(&st->lock); return ret ? ret : len; } static IIO_DEVICE_ATTR(accel_x_mag_either_rising_value, S_IRUGO | S_IWUSR, sca3000_show_thresh, sca3000_write_thresh, SCA3000_REG_CTRL_SEL_MD_X_TH); static IIO_DEVICE_ATTR(accel_y_mag_either_rising_value, S_IRUGO | S_IWUSR, sca3000_show_thresh, sca3000_write_thresh, SCA3000_REG_CTRL_SEL_MD_Y_TH); static IIO_DEVICE_ATTR(accel_z_mag_either_rising_value, S_IRUGO | S_IWUSR, sca3000_show_thresh, sca3000_write_thresh, SCA3000_REG_CTRL_SEL_MD_Z_TH); static struct attribute *sca3000_attributes[] = { &iio_dev_attr_name.dev_attr.attr, &iio_dev_attr_revision.dev_attr.attr, &iio_dev_attr_accel_scale.dev_attr.attr, &iio_dev_attr_accel_x_raw.dev_attr.attr, &iio_dev_attr_accel_y_raw.dev_attr.attr, &iio_dev_attr_accel_z_raw.dev_attr.attr, &iio_dev_attr_measurement_mode_available.dev_attr.attr, &iio_dev_attr_measurement_mode.dev_attr.attr, &iio_dev_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_sampling_frequency.dev_attr.attr, NULL, }; static struct attribute *sca3000_attributes_with_temp[] = { &iio_dev_attr_name.dev_attr.attr, &iio_dev_attr_revision.dev_attr.attr, &iio_dev_attr_accel_scale.dev_attr.attr, &iio_dev_attr_accel_x_raw.dev_attr.attr, &iio_dev_attr_accel_y_raw.dev_attr.attr, &iio_dev_attr_accel_z_raw.dev_attr.attr, &iio_dev_attr_measurement_mode_available.dev_attr.attr, &iio_dev_attr_measurement_mode.dev_attr.attr, &iio_dev_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_sampling_frequency.dev_attr.attr, /* Only present if temp sensor is */ &iio_dev_attr_temp_raw.dev_attr.attr, &iio_const_attr_temp_offset.dev_attr.attr, &iio_const_attr_temp_scale.dev_attr.attr, NULL, }; static const struct attribute_group sca3000_attribute_group = { .attrs = sca3000_attributes, }; static const struct attribute_group sca3000_attribute_group_with_temp = { .attrs = sca3000_attributes_with_temp, }; /* RING RELATED interrupt handler */ /* depending on event, push to the ring buffer event chrdev or the event one */ /** * sca3000_interrupt_handler_bh() - handling ring and non ring events * * This function is complicated by the fact that the devices can signify ring * and non ring events via the same interrupt line and they can only * be distinguished via a read of the relevant status register. **/ static void sca3000_interrupt_handler_bh(struct work_struct *work_s) { struct sca3000_state *st = container_of(work_s, struct sca3000_state, interrupt_handler_ws); u8 *rx; int ret; /* Could lead if badly timed to an extra read of status reg, * but ensures no interrupt is missed. */ enable_irq(st->us->irq); mutex_lock(&st->lock); ret = sca3000_read_data(st, SCA3000_REG_ADDR_INT_STATUS, &rx, 1); mutex_unlock(&st->lock); if (ret) goto done; sca3000_ring_int_process(rx[1], st->indio_dev->ring); if (rx[1] & SCA3000_INT_STATUS_FREE_FALL) iio_push_event(st->indio_dev, 0, IIO_EVENT_CODE_FREE_FALL, st->last_timestamp); if (rx[1] & SCA3000_INT_STATUS_Y_TRIGGER) iio_push_event(st->indio_dev, 0, IIO_EVENT_CODE_ACCEL_Y_HIGH, st->last_timestamp); if (rx[1] & SCA3000_INT_STATUS_X_TRIGGER) iio_push_event(st->indio_dev, 0, IIO_EVENT_CODE_ACCEL_X_HIGH, st->last_timestamp); if (rx[1] & SCA3000_INT_STATUS_Z_TRIGGER) iio_push_event(st->indio_dev, 0, IIO_EVENT_CODE_ACCEL_Z_HIGH, st->last_timestamp); done: kfree(rx); return; } /** * sca3000_handler_th() handles all interrupt events from device * * These devices deploy unified interrupt status registers meaning * all interrupts must be handled together **/ static int sca3000_handler_th(struct iio_dev *dev_info, int index, s64 timestamp, int no_test) { struct sca3000_state *st = dev_info->dev_data; st->last_timestamp = timestamp; schedule_work(&st->interrupt_handler_ws); return 0; } /** * sca3000_query_mo_det() is motion detection enabled for this axis * * First queries if motion detection is enabled and then if this axis is * on. **/ static ssize_t sca3000_query_mo_det(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev->parent); struct sca3000_state *st = indio_dev->dev_data; struct iio_event_attr *this_attr = to_iio_event_attr(attr); int ret, len = 0; u8 *rx; u8 protect_mask = 0x03; /* read current value of mode register */ mutex_lock(&st->lock); ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1); if (ret) goto error_ret; if ((rx[1]&protect_mask) != SCA3000_MEAS_MODE_MOT_DET) len += sprintf(buf + len, "0\n"); else { kfree(rx); ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL, &rx); if (ret) goto error_ret; /* only supporting logical or's for now */ len += sprintf(buf + len, "%d\n", (rx[1] & this_attr->mask) ? 1 : 0); } kfree(rx); error_ret: mutex_unlock(&st->lock); return ret ? ret : len; } /** * sca3000_query_free_fall_mode() is free fall mode enabled **/ static ssize_t sca3000_query_free_fall_mode(struct device *dev, struct device_attribute *attr, char *buf) { int ret, len; u8 *rx; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct sca3000_state *st = indio_dev->dev_data; mutex_lock(&st->lock); ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1); mutex_unlock(&st->lock); if (ret) return ret; len = sprintf(buf, "%d\n", !!(rx[1] & SCA3000_FREE_FALL_DETECT)); kfree(rx); return len; } /** * sca3000_query_ring_int() is the hardware ring status interrupt enabled **/ static ssize_t sca3000_query_ring_int(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_event_attr *this_attr = to_iio_event_attr(attr); int ret, len; u8 *rx; struct iio_dev *indio_dev = dev_get_drvdata(dev->parent); struct sca3000_state *st = indio_dev->dev_data; mutex_lock(&st->lock); ret = sca3000_read_data(st, SCA3000_REG_ADDR_INT_MASK, &rx, 1); mutex_unlock(&st->lock); if (ret) return ret; len = sprintf(buf, "%d\n", (rx[1] & this_attr->mask) ? 1 : 0); kfree(rx); return len; } /** * sca3000_set_ring_int() set state of ring status interrupt **/ static ssize_t sca3000_set_ring_int(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev->parent); struct sca3000_state *st = indio_dev->dev_data; struct iio_event_attr *this_attr = to_iio_event_attr(attr); long val; int ret; u8 *rx; mutex_lock(&st->lock); ret = strict_strtol(buf, 10, &val); if (ret) goto error_ret; ret = sca3000_read_data(st, SCA3000_REG_ADDR_INT_MASK, &rx, 1); if (ret) goto error_ret; if (val) ret = sca3000_write_reg(st, SCA3000_REG_ADDR_INT_MASK, rx[1] | this_attr->mask); else ret = sca3000_write_reg(st, SCA3000_REG_ADDR_INT_MASK, rx[1] & ~this_attr->mask); kfree(rx); error_ret: mutex_unlock(&st->lock); return ret ? ret : len; } /** * sca3000_set_free_fall_mode() simple on off control for free fall int * * In these chips the free fall detector should send an interrupt if * the device falls more than 25cm. This has not been tested due * to fragile wiring. **/ static ssize_t sca3000_set_free_fall_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct sca3000_state *st = indio_dev->dev_data; long val; int ret; u8 *rx; u8 protect_mask = SCA3000_FREE_FALL_DETECT; mutex_lock(&st->lock); ret = strict_strtol(buf, 10, &val); if (ret) goto error_ret; /* read current value of mode register */ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1); if (ret) goto error_ret; /*if off and should be on*/ if (val && !(rx[1] & protect_mask)) ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, (rx[1] | SCA3000_FREE_FALL_DETECT)); /* if on and should be off */ else if (!val && (rx[1]&protect_mask)) ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, (rx[1] & ~protect_mask)); kfree(rx); error_ret: mutex_unlock(&st->lock); return ret ? ret : len; } /** * sca3000_set_mo_det() simple on off control for motion detector * * This is a per axis control, but enabling any will result in the * motion detector unit being enabled. * N.B. enabling motion detector stops normal data acquisition. * There is a complexity in knowing which mode to return to when * this mode is disabled. Currently normal mode is assumed. **/ static ssize_t sca3000_set_mo_det(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev->parent); struct sca3000_state *st = indio_dev->dev_data; struct iio_event_attr *this_attr = to_iio_event_attr(attr); long val; int ret; u8 *rx; u8 protect_mask = 0x03; ret = strict_strtol(buf, 10, &val); if (ret) return ret; mutex_lock(&st->lock); /* First read the motion detector config to find out if * this axis is on*/ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL, &rx); if (ret) goto exit_point; /* Off and should be on */ if (val && !(rx[1] & this_attr->mask)) { ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL, rx[1] | this_attr->mask); if (ret) goto exit_point_free_rx; st->mo_det_use_count++; } else if (!val && (rx[1]&this_attr->mask)) { ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL, rx[1] & ~(this_attr->mask)); if (ret) goto exit_point_free_rx; st->mo_det_use_count--; } else /* relies on clean state for device on boot */ goto exit_point_free_rx; kfree(rx); /* read current value of mode register */ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1); if (ret) goto exit_point; /*if off and should be on*/ if ((st->mo_det_use_count) && ((rx[1]&protect_mask) != SCA3000_MEAS_MODE_MOT_DET)) ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, (rx[1] & ~protect_mask) | SCA3000_MEAS_MODE_MOT_DET); /* if on and should be off */ else if (!(st->mo_det_use_count) && ((rx[1]&protect_mask) == SCA3000_MEAS_MODE_MOT_DET)) ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, (rx[1] & ~protect_mask)); exit_point_free_rx: kfree(rx); exit_point: mutex_unlock(&st->lock); return ret ? ret : len; } /* Shared event handler for all events as single event status register */ IIO_EVENT_SH(all, &sca3000_handler_th); /* Free fall detector related event attribute */ IIO_EVENT_ATTR_FREE_FALL_DETECT_SH(iio_event_all, sca3000_query_free_fall_mode, sca3000_set_free_fall_mode, 0) /* Motion detector related event attributes */ IIO_EVENT_ATTR_SH(accel_x_mag_either_rising_en, iio_event_all, sca3000_query_mo_det, sca3000_set_mo_det, SCA3000_MD_CTRL_OR_X); IIO_EVENT_ATTR_SH(accel_y_mag_either_rising_en, iio_event_all, sca3000_query_mo_det, sca3000_set_mo_det, SCA3000_MD_CTRL_OR_Y); IIO_EVENT_ATTR_SH(accel_z_mag_either_rising_en, iio_event_all, sca3000_query_mo_det, sca3000_set_mo_det, SCA3000_MD_CTRL_OR_Z); /* Hardware ring buffer related event attributes */ IIO_EVENT_ATTR_RING_50_FULL_SH(iio_event_all, sca3000_query_ring_int, sca3000_set_ring_int, SCA3000_INT_MASK_RING_HALF); IIO_EVENT_ATTR_RING_75_FULL_SH(iio_event_all, sca3000_query_ring_int, sca3000_set_ring_int, SCA3000_INT_MASK_RING_THREE_QUARTER); static struct attribute *sca3000_event_attributes[] = { &iio_event_attr_free_fall.dev_attr.attr, &iio_event_attr_accel_x_mag_either_rising_en.dev_attr.attr, &iio_event_attr_accel_y_mag_either_rising_en.dev_attr.attr, &iio_event_attr_accel_z_mag_either_rising_en.dev_attr.attr, &iio_event_attr_ring_50_full.dev_attr.attr, &iio_event_attr_ring_75_full.dev_attr.attr, &iio_dev_attr_accel_x_mag_either_rising_value.dev_attr.attr, &iio_dev_attr_accel_y_mag_either_rising_value.dev_attr.attr, &iio_dev_attr_accel_z_mag_either_rising_value.dev_attr.attr, NULL, }; static struct attribute_group sca3000_event_attribute_group = { .attrs = sca3000_event_attributes, }; /** * sca3000_clean_setup() get the device into a predictable state * * Devices use flash memory to store many of the register values * and hence can come up in somewhat unpredictable states. * Hence reset everything on driver load. **/ static int sca3000_clean_setup(struct sca3000_state *st) { int ret; u8 *rx; mutex_lock(&st->lock); /* Ensure all interrupts have been acknowledged */ ret = sca3000_read_data(st, SCA3000_REG_ADDR_INT_STATUS, &rx, 1); if (ret) goto error_ret; kfree(rx); /* Turn off all motion detection channels */ ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL, &rx); if (ret) goto error_ret; ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL, rx[1] & SCA3000_MD_CTRL_PROT_MASK); kfree(rx); if (ret) goto error_ret; /* Disable ring buffer */ sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL, &rx); /* Frequency of ring buffer sampling deliberately restricted to make * debugging easier - add control of this later */ ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL, (rx[1] & SCA3000_OUT_CTRL_PROT_MASK) | SCA3000_OUT_CTRL_BUF_X_EN | SCA3000_OUT_CTRL_BUF_Y_EN | SCA3000_OUT_CTRL_BUF_Z_EN | SCA3000_OUT_CTRL_BUF_DIV_4); kfree(rx); if (ret) goto error_ret; /* Enable interrupts, relevant to mode and set up as active low */ ret = sca3000_read_data(st, SCA3000_REG_ADDR_INT_MASK, &rx, 1); if (ret) goto error_ret; ret = sca3000_write_reg(st, SCA3000_REG_ADDR_INT_MASK, (rx[1] & SCA3000_INT_MASK_PROT_MASK) | SCA3000_INT_MASK_ACTIVE_LOW); kfree(rx); if (ret) goto error_ret; /* Select normal measurement mode, free fall off, ring off */ /* Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5 * as that occurs in one of the example on the datasheet */ ret = sca3000_read_data(st, SCA3000_REG_ADDR_MODE, &rx, 1); if (ret) goto error_ret; ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, (rx[1] & SCA3000_MODE_PROT_MASK)); kfree(rx); st->bpse = 11; error_ret: mutex_unlock(&st->lock); return ret; } static int __devinit __sca3000_probe(struct spi_device *spi, enum sca3000_variant variant) { int ret, regdone = 0; struct sca3000_state *st; st = kzalloc(sizeof(struct sca3000_state), GFP_KERNEL); if (st == NULL) { ret = -ENOMEM; goto error_ret; } spi_set_drvdata(spi, st); st->tx = kmalloc(sizeof(*st->tx)*6, GFP_KERNEL); if (st->tx == NULL) { ret = -ENOMEM; goto error_clear_st; } st->rx = kmalloc(sizeof(*st->rx)*3, GFP_KERNEL); if (st->rx == NULL) { ret = -ENOMEM; goto error_free_tx; } st->us = spi; mutex_init(&st->lock); st->info = &sca3000_spi_chip_info_tbl[variant]; st->indio_dev = iio_allocate_device(); if (st->indio_dev == NULL) { ret = -ENOMEM; goto error_free_rx; } st->indio_dev->dev.parent = &spi->dev; st->indio_dev->num_interrupt_lines = 1; st->indio_dev->event_attrs = &sca3000_event_attribute_group; if (st->info->temp_output) st->indio_dev->attrs = &sca3000_attribute_group_with_temp; else st->indio_dev->attrs = &sca3000_attribute_group; st->indio_dev->dev_data = (void *)(st); st->indio_dev->modes = INDIO_DIRECT_MODE; sca3000_configure_ring(st->indio_dev); ret = iio_device_register(st->indio_dev); if (ret < 0) goto error_free_dev; regdone = 1; ret = iio_ring_buffer_register(st->indio_dev->ring, 0); if (ret < 0) goto error_unregister_dev; if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) { INIT_WORK(&st->interrupt_handler_ws, sca3000_interrupt_handler_bh); ret = iio_register_interrupt_line(spi->irq, st->indio_dev, 0, IRQF_TRIGGER_FALLING, "sca3000"); if (ret) goto error_unregister_ring; /* RFC * Probably a common situation. All interrupts need an ack * and there is only one handler so the complicated list system * is overkill. At very least a simpler registration method * might be worthwhile. */ iio_add_event_to_list( iio_event_attr_accel_z_mag_either_rising_en.listel, &st->indio_dev ->interrupts[0]->ev_list); } sca3000_register_ring_funcs(st->indio_dev); ret = sca3000_clean_setup(st); if (ret) goto error_unregister_interrupt_line; return 0; error_unregister_interrupt_line: if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) iio_unregister_interrupt_line(st->indio_dev, 0); error_unregister_ring: iio_ring_buffer_unregister(st->indio_dev->ring); error_unregister_dev: error_free_dev: if (regdone) iio_device_unregister(st->indio_dev); else iio_free_device(st->indio_dev); error_free_rx: kfree(st->rx); error_free_tx: kfree(st->tx); error_clear_st: kfree(st); error_ret: return ret; } static int sca3000_stop_all_interrupts(struct sca3000_state *st) { int ret; u8 *rx; mutex_lock(&st->lock); ret = sca3000_read_data(st, SCA3000_REG_ADDR_INT_MASK, &rx, 1); if (ret) goto error_ret; ret = sca3000_write_reg(st, SCA3000_REG_ADDR_INT_MASK, (rx[1] & ~(SCA3000_INT_MASK_RING_THREE_QUARTER | SCA3000_INT_MASK_RING_HALF | SCA3000_INT_MASK_ALL_INTS))); error_ret: kfree(rx); return ret; } static int sca3000_remove(struct spi_device *spi) { struct sca3000_state *st = spi_get_drvdata(spi); struct iio_dev *indio_dev = st->indio_dev; int ret; /* Must ensure no interrupts can be generated after this!*/ ret = sca3000_stop_all_interrupts(st); if (ret) return ret; if (spi->irq && gpio_is_valid(irq_to_gpio(spi->irq)) > 0) iio_unregister_interrupt_line(indio_dev, 0); iio_ring_buffer_unregister(indio_dev->ring); sca3000_unconfigure_ring(indio_dev); iio_device_unregister(indio_dev); kfree(st->tx); kfree(st->rx); kfree(st); return 0; } /* These macros save on an awful lot of repeated code */ #define SCA3000_VARIANT_PROBE(_name) \ static int __devinit \ sca3000_##_name##_probe(struct spi_device *spi) \ { \ return __sca3000_probe(spi, _name); \ } #define SCA3000_VARIANT_SPI_DRIVER(_name) \ struct spi_driver sca3000_##_name##_driver = { \ .driver = { \ .name = "sca3000_" #_name, \ .owner = THIS_MODULE, \ }, \ .probe = sca3000_##_name##_probe, \ .remove = __devexit_p(sca3000_remove), \ } SCA3000_VARIANT_PROBE(d01); static SCA3000_VARIANT_SPI_DRIVER(d01); SCA3000_VARIANT_PROBE(e02); static SCA3000_VARIANT_SPI_DRIVER(e02); SCA3000_VARIANT_PROBE(e04); static SCA3000_VARIANT_SPI_DRIVER(e04); SCA3000_VARIANT_PROBE(e05); static SCA3000_VARIANT_SPI_DRIVER(e05); static __init int sca3000_init(void) { int ret; ret = spi_register_driver(&sca3000_d01_driver); if (ret) goto error_ret; ret = spi_register_driver(&sca3000_e02_driver); if (ret) goto error_unreg_d01; ret = spi_register_driver(&sca3000_e04_driver); if (ret) goto error_unreg_e02; ret = spi_register_driver(&sca3000_e05_driver); if (ret) goto error_unreg_e04; return 0; error_unreg_e04: spi_unregister_driver(&sca3000_e04_driver); error_unreg_e02: spi_unregister_driver(&sca3000_e02_driver); error_unreg_d01: spi_unregister_driver(&sca3000_d01_driver); error_ret: return ret; } static __exit void sca3000_exit(void) { spi_unregister_driver(&sca3000_e05_driver); spi_unregister_driver(&sca3000_e04_driver); spi_unregister_driver(&sca3000_e02_driver); spi_unregister_driver(&sca3000_d01_driver); } module_init(sca3000_init); module_exit(sca3000_exit); MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>"); MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
MrSGA/bricked-tenderloin
arch/avr32/mm/cache.c
1785
3833
/* * Copyright (C) 2004-2006 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/highmem.h> #include <linux/unistd.h> #include <asm/cacheflush.h> #include <asm/cachectl.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/syscalls.h> /* * If you attempt to flush anything more than this, you need superuser * privileges. The value is completely arbitrary. */ #define CACHEFLUSH_MAX_LEN 1024 void invalidate_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz, mask; linesz = boot_cpu_data.dcache.linesz; mask = linesz - 1; /* when first and/or last cachelines are shared, flush them * instead of invalidating ... never discard valid data! */ begin = (unsigned long)start; end = begin + size; if (begin & mask) { flush_dcache_line(start); begin += linesz; } if (end & mask) { flush_dcache_line((void *)end); end &= ~mask; } /* remaining cachelines only need invalidation */ for (v = begin; v < end; v += linesz) invalidate_dcache_line((void *)v); flush_write_buffer(); } void clean_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz; linesz = boot_cpu_data.dcache.linesz; begin = (unsigned long)start & ~(linesz - 1); end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); for (v = begin; v < end; v += linesz) clean_dcache_line((void *)v); flush_write_buffer(); } void flush_dcache_region(void *start, size_t size) { unsigned long v, begin, end, linesz; linesz = boot_cpu_data.dcache.linesz; begin = (unsigned long)start & ~(linesz - 1); end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); for (v = begin; v < end; v += linesz) flush_dcache_line((void *)v); flush_write_buffer(); } void invalidate_icache_region(void *start, size_t size) { unsigned long v, begin, end, linesz; linesz = boot_cpu_data.icache.linesz; begin = (unsigned long)start & ~(linesz - 1); end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1); for (v = begin; v < end; v += linesz) invalidate_icache_line((void *)v); } static inline void __flush_icache_range(unsigned long start, unsigned long end) { unsigned long v, linesz; linesz = boot_cpu_data.dcache.linesz; for (v = start; v < end; v += linesz) { clean_dcache_line((void *)v); invalidate_icache_line((void *)v); } flush_write_buffer(); } /* * This one is called after a module has been loaded. */ void flush_icache_range(unsigned long start, unsigned long end) { unsigned long linesz; linesz = boot_cpu_data.dcache.linesz; __flush_icache_range(start & ~(linesz - 1), (end + linesz - 1) & ~(linesz - 1)); } /* * This one is called from do_no_page(), do_swap_page() and install_page(). */ void flush_icache_page(struct vm_area_struct *vma, struct page *page) { if (vma->vm_flags & VM_EXEC) { void *v = page_address(page); __flush_icache_range((unsigned long)v, (unsigned long)v + PAGE_SIZE); } } asmlinkage int sys_cacheflush(int operation, void __user *addr, size_t len) { int ret; if (len > CACHEFLUSH_MAX_LEN) { ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out; } ret = -EFAULT; if (!access_ok(VERIFY_WRITE, addr, len)) goto out; switch (operation) { case CACHE_IFLUSH: flush_icache_range((unsigned long)addr, (unsigned long)addr + len); ret = 0; break; default: ret = -EINVAL; } out: return ret; } void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { memcpy(dst, src, len); if (vma->vm_flags & VM_EXEC) flush_icache_range((unsigned long)dst, (unsigned long)dst + len); }
gpl-2.0
kirananto/RaZorKernel
arch/arm/mach-zynq/common.c
2041
2819
/* * This file contains common code that is intended to be used across * boards so that it's not replicated. * * Copyright (C) 2011 Xilinx * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/cpumask.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/clk/zynq.h> #include <linux/clocksource.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/of.h> #include <linux/irqchip.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <asm/mach-types.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/smp_scu.h> #include <asm/hardware/cache-l2x0.h> #include "common.h" void __iomem *zynq_scu_base; static struct of_device_id zynq_of_bus_ids[] __initdata = { { .compatible = "simple-bus", }, {} }; /** * zynq_init_machine - System specific initialization, intended to be * called from board specific initialization. */ static void __init zynq_init_machine(void) { /* * 64KB way size, 8-way associativity, parity disabled */ l2x0_of_init(0x02060000, 0xF0F0FFFF); of_platform_bus_probe(NULL, zynq_of_bus_ids, NULL); } static void __init zynq_timer_init(void) { zynq_slcr_init(); clocksource_of_init(); } static struct map_desc zynq_cortex_a9_scu_map __initdata = { .length = SZ_256, .type = MT_DEVICE, }; static void __init zynq_scu_map_io(void) { unsigned long base; base = scu_a9_get_base(); zynq_cortex_a9_scu_map.pfn = __phys_to_pfn(base); /* Expected address is in vmalloc area that's why simple assign here */ zynq_cortex_a9_scu_map.virtual = base; iotable_init(&zynq_cortex_a9_scu_map, 1); zynq_scu_base = (void __iomem *)base; BUG_ON(!zynq_scu_base); } /** * zynq_map_io - Create memory mappings needed for early I/O. */ static void __init zynq_map_io(void) { debug_ll_io_init(); zynq_scu_map_io(); } static void zynq_system_reset(char mode, const char *cmd) { zynq_slcr_system_reset(); } static const char * const zynq_dt_match[] = { "xlnx,zynq-zc702", "xlnx,zynq-7000", NULL }; MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform") .smp = smp_ops(zynq_smp_ops), .map_io = zynq_map_io, .init_irq = irqchip_init, .init_machine = zynq_init_machine, .init_time = zynq_timer_init, .dt_compat = zynq_dt_match, .restart = zynq_system_reset, MACHINE_END
gpl-2.0
GalaxyTab4/android_kernel_samsung_degas
net/netfilter/nf_conntrack_acct.c
2297
3071
/* Accouting handling for netfilter. */ /* * (C) 2008 Krzysztof Piotr Oledzki <ole@ans.pl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/netfilter.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/export.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_acct.h> static bool nf_ct_acct __read_mostly; module_param_named(acct, nf_ct_acct, bool, 0644); MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting."); #ifdef CONFIG_SYSCTL static struct ctl_table acct_sysctl_table[] = { { .procname = "nf_conntrack_acct", .data = &init_net.ct.sysctl_acct, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, {} }; #endif /* CONFIG_SYSCTL */ unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir) { struct nf_conn_counter *acct; acct = nf_conn_acct_find(ct); if (!acct) return 0; return seq_printf(s, "packets=%llu bytes=%llu ", (unsigned long long)atomic64_read(&acct[dir].packets), (unsigned long long)atomic64_read(&acct[dir].bytes)); }; EXPORT_SYMBOL_GPL(seq_print_acct); static struct nf_ct_ext_type acct_extend __read_mostly = { .len = sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]), .align = __alignof__(struct nf_conn_counter[IP_CT_DIR_MAX]), .id = NF_CT_EXT_ACCT, }; #ifdef CONFIG_SYSCTL static int nf_conntrack_acct_init_sysctl(struct net *net) { struct ctl_table *table; table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table), GFP_KERNEL); if (!table) goto out; table[0].data = &net->ct.sysctl_acct; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) table[0].procname = NULL; net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter", table); if (!net->ct.acct_sysctl_header) { printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n"); goto out_register; } return 0; out_register: kfree(table); out: return -ENOMEM; } static void nf_conntrack_acct_fini_sysctl(struct net *net) { struct ctl_table *table; table = net->ct.acct_sysctl_header->ctl_table_arg; unregister_net_sysctl_table(net->ct.acct_sysctl_header); kfree(table); } #else static int nf_conntrack_acct_init_sysctl(struct net *net) { return 0; } static void nf_conntrack_acct_fini_sysctl(struct net *net) { } #endif int nf_conntrack_acct_pernet_init(struct net *net) { net->ct.sysctl_acct = nf_ct_acct; return nf_conntrack_acct_init_sysctl(net); } void nf_conntrack_acct_pernet_fini(struct net *net) { nf_conntrack_acct_fini_sysctl(net); } int nf_conntrack_acct_init(void) { int ret = nf_ct_extend_register(&acct_extend); if (ret < 0) pr_err("nf_conntrack_acct: Unable to register extension\n"); return ret; } void nf_conntrack_acct_fini(void) { nf_ct_extend_unregister(&acct_extend); }
gpl-2.0
dwengen/linux
arch/arm/mach-mv78xx0/buffalo-wxl-setup.c
4089
3188
/* * arch/arm/mach-mv78xx0/buffalo-wxl-setup.c * * Buffalo WXL (Terastation Duo) Setup routines * * sebastien requiem <sebastien@requiem.fr> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/ethtool.h> #include <linux/i2c.h> #include <mach/mv78xx0.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "common.h" #include "mpp.h" /* This arch has 2 Giga Ethernet */ static struct mv643xx_eth_platform_data db78x00_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(0), }; static struct mv643xx_eth_platform_data db78x00_ge01_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; /* 2 SATA controller supporting HotPlug */ static struct mv_sata_platform_data db78x00_sata_data = { .n_ports = 2, }; static struct i2c_board_info __initdata db78x00_i2c_rtc = { I2C_BOARD_INFO("ds1338", 0x68), }; static unsigned int wxl_mpp_config[] __initdata = { MPP0_GE1_TXCLK, MPP1_GE1_TXCTL, MPP2_GE1_RXCTL, MPP3_GE1_RXCLK, MPP4_GE1_TXD0, MPP5_GE1_TXD1, MPP6_GE1_TXD2, MPP7_GE1_TXD3, MPP8_GE1_RXD0, MPP9_GE1_RXD1, MPP10_GE1_RXD2, MPP11_GE1_RXD3, MPP12_GPIO, MPP13_SYSRST_OUTn, MPP14_SATA1_ACTn, MPP15_SATA0_ACTn, MPP16_GPIO, MPP17_GPIO, MPP18_GPIO, MPP19_GPIO, MPP20_GPIO, MPP21_GPIO, MPP22_GPIO, MPP23_GPIO, MPP24_UA2_TXD, MPP25_UA2_RXD, MPP26_UA2_CTSn, MPP27_UA2_RTSn, MPP28_GPIO, MPP29_SYSRST_OUTn, MPP30_GPIO, MPP31_GPIO, MPP32_GPIO, MPP33_GPIO, MPP34_GPIO, MPP35_GPIO, MPP36_GPIO, MPP37_GPIO, MPP38_GPIO, MPP39_GPIO, MPP40_UNUSED, MPP41_UNUSED, MPP42_UNUSED, MPP43_UNUSED, MPP44_UNUSED, MPP45_UNUSED, MPP46_UNUSED, MPP47_UNUSED, MPP48_SATA1_ACTn, MPP49_SATA0_ACTn, 0 }; static void __init wxl_init(void) { /* * Basic MV78xx0 setup. Needs to be called early. */ mv78xx0_init(); mv78xx0_mpp_conf(wxl_mpp_config); /* * Partition on-chip peripherals between the two CPU cores. */ mv78xx0_ehci0_init(); mv78xx0_ehci1_init(); mv78xx0_ehci2_init(); mv78xx0_ge00_init(&db78x00_ge00_data); mv78xx0_ge01_init(&db78x00_ge01_data); mv78xx0_sata_init(&db78x00_sata_data); mv78xx0_uart0_init(); mv78xx0_uart1_init(); mv78xx0_uart2_init(); mv78xx0_uart3_init(); mv78xx0_i2c_init(); i2c_register_board_info(0, &db78x00_i2c_rtc, 1); } static int __init wxl_pci_init(void) { if (machine_is_terastation_wxl()) { /* * Assign the x16 PCIe slot on the board to CPU core * #0, and let CPU core #1 have the four x1 slots. */ if (mv78xx0_core_index() == 0) mv78xx0_pcie_init(0, 1); else mv78xx0_pcie_init(1, 0); } return 0; } subsys_initcall(wxl_pci_init); MACHINE_START(TERASTATION_WXL, "Buffalo Nas WXL") /* Maintainer: Sebastien Requiem <sebastien@requiem.fr> */ .atag_offset = 0x100, .init_machine = wxl_init, .map_io = mv78xx0_map_io, .init_early = mv78xx0_init_early, .init_irq = mv78xx0_init_irq, .init_time = mv78xx0_timer_init, .restart = mv78xx0_restart, MACHINE_END
gpl-2.0
ghbhaha/AK-OnePone
drivers/pci/hotplug/pciehp_pci.c
4857
4188
/* * PCI Express Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include "../pci.h" #include "pciehp.h" static int __ref pciehp_add_bridge(struct pci_dev *dev) { struct pci_bus *parent = dev->bus; int pass, busnr, start = parent->secondary; int end = parent->subordinate; for (busnr = start; busnr <= end; busnr++) { if (!pci_find_bus(pci_domain_nr(parent), busnr)) break; } if (busnr-- > end) { err("No bus number available for hot-added bridge %s\n", pci_name(dev)); return -1; } for (pass = 0; pass < 2; pass++) busnr = pci_scan_bridge(parent, dev, busnr, pass); if (!dev->subordinate) return -1; return 0; } int pciehp_configure_device(struct slot *p_slot) { struct pci_dev *dev; struct pci_dev *bridge = p_slot->ctrl->pcie->port; struct pci_bus *parent = bridge->subordinate; int num, fn; struct controller *ctrl = p_slot->ctrl; dev = pci_get_slot(parent, PCI_DEVFN(0, 0)); if (dev) { ctrl_err(ctrl, "Device %s already exists " "at %04x:%02x:00, cannot hot-add\n", pci_name(dev), pci_domain_nr(parent), parent->number); pci_dev_put(dev); return -EINVAL; } num = pci_scan_slot(parent, PCI_DEVFN(0, 0)); if (num == 0) { ctrl_err(ctrl, "No new device found\n"); return -ENODEV; } for (fn = 0; fn < 8; fn++) { dev = pci_get_slot(parent, PCI_DEVFN(0, fn)); if (!dev) continue; if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) { pciehp_add_bridge(dev); } pci_dev_put(dev); } pci_assign_unassigned_bridge_resources(bridge); for (fn = 0; fn < 8; fn++) { dev = pci_get_slot(parent, PCI_DEVFN(0, fn)); if (!dev) continue; if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { pci_dev_put(dev); continue; } pci_configure_slot(dev); pci_dev_put(dev); } pci_bus_add_devices(parent); return 0; } int pciehp_unconfigure_device(struct slot *p_slot) { int ret, rc = 0; int j; u8 bctl = 0; u8 presence = 0; struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate; u16 command; struct controller *ctrl = p_slot->ctrl; ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n", __func__, pci_domain_nr(parent), parent->number); ret = pciehp_get_adapter_status(p_slot, &presence); if (ret) presence = 0; for (j = 0; j < 8; j++) { struct pci_dev *temp = pci_get_slot(parent, PCI_DEVFN(0, j)); if (!temp) continue; if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); if (bctl & PCI_BRIDGE_CTL_VGA) { ctrl_err(ctrl, "Cannot remove display device %s\n", pci_name(temp)); pci_dev_put(temp); rc = -EINVAL; break; } } pci_stop_and_remove_bus_device(temp); /* * Ensure that no new Requests will be generated from * the device. */ if (presence) { pci_read_config_word(temp, PCI_COMMAND, &command); command &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_SERR); command |= PCI_COMMAND_INTX_DISABLE; pci_write_config_word(temp, PCI_COMMAND, command); } pci_dev_put(temp); } return rc; }
gpl-2.0
android-armv7a-belalang-tempur/Android_SpeedKernel_3.4
drivers/hwmon/smsc47m192.c
4857
21295
/* * smsc47m192.c - Support for hardware monitoring block of * SMSC LPC47M192 and compatible Super I/O chips * * Copyright (C) 2006 Hartmut Rick <linux@rick.claranet.de> * * Derived from lm78.c and other chip drivers. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/sysfs.h> #include <linux/mutex.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; /* SMSC47M192 registers */ #define SMSC47M192_REG_IN(nr) ((nr) < 6 ? (0x20 + (nr)) : \ (0x50 + (nr) - 6)) #define SMSC47M192_REG_IN_MAX(nr) ((nr) < 6 ? (0x2b + (nr) * 2) : \ (0x54 + (((nr) - 6) * 2))) #define SMSC47M192_REG_IN_MIN(nr) ((nr) < 6 ? (0x2c + (nr) * 2) : \ (0x55 + (((nr) - 6) * 2))) static u8 SMSC47M192_REG_TEMP[3] = { 0x27, 0x26, 0x52 }; static u8 SMSC47M192_REG_TEMP_MAX[3] = { 0x39, 0x37, 0x58 }; static u8 SMSC47M192_REG_TEMP_MIN[3] = { 0x3A, 0x38, 0x59 }; #define SMSC47M192_REG_TEMP_OFFSET(nr) ((nr) == 2 ? 0x1e : 0x1f) #define SMSC47M192_REG_ALARM1 0x41 #define SMSC47M192_REG_ALARM2 0x42 #define SMSC47M192_REG_VID 0x47 #define SMSC47M192_REG_VID4 0x49 #define SMSC47M192_REG_CONFIG 0x40 #define SMSC47M192_REG_SFR 0x4f #define SMSC47M192_REG_COMPANY_ID 0x3e #define SMSC47M192_REG_VERSION 0x3f /* generalised scaling with integer rounding */ static inline int SCALE(long val, int mul, int div) { if (val < 0) return (val * mul - div / 2) / div; else return (val * mul + div / 2) / div; } /* Conversions */ /* smsc47m192 internally scales voltage measurements */ static const u16 nom_mv[] = { 2500, 2250, 3300, 5000, 12000, 3300, 1500, 1800 }; static inline unsigned int IN_FROM_REG(u8 reg, int n) { return SCALE(reg, nom_mv[n], 192); } static inline u8 IN_TO_REG(unsigned long val, int n) { return SENSORS_LIMIT(SCALE(val, 192, nom_mv[n]), 0, 255); } /* * TEMP: 0.001 degC units (-128C to +127C) * REG: 1C/bit, two's complement */ static inline s8 TEMP_TO_REG(int val) { return SENSORS_LIMIT(SCALE(val, 1, 1000), -128000, 127000); } static inline int TEMP_FROM_REG(s8 val) { return val * 1000; } struct smsc47m192_data { struct device *hwmon_dev; struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u8 in[8]; /* Register value */ u8 in_max[8]; /* Register value */ u8 in_min[8]; /* Register value */ s8 temp[3]; /* Register value */ s8 temp_max[3]; /* Register value */ s8 temp_min[3]; /* Register value */ s8 temp_offset[3]; /* Register value */ u16 alarms; /* Register encoding, combined */ u8 vid; /* Register encoding, combined */ u8 vrm; }; static int smsc47m192_probe(struct i2c_client *client, const struct i2c_device_id *id); static int smsc47m192_detect(struct i2c_client *client, struct i2c_board_info *info); static int smsc47m192_remove(struct i2c_client *client); static struct smsc47m192_data *smsc47m192_update_device(struct device *dev); static const struct i2c_device_id smsc47m192_id[] = { { "smsc47m192", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, smsc47m192_id); static struct i2c_driver smsc47m192_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "smsc47m192", }, .probe = smsc47m192_probe, .remove = smsc47m192_remove, .id_table = smsc47m192_id, .detect = smsc47m192_detect, .address_list = normal_i2c, }; /* Voltages */ static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr], nr)); } static ssize_t show_in_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr], nr)); } static ssize_t show_in_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr], nr)); } static ssize_t set_in_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_min[nr] = IN_TO_REG(val, nr); i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MIN(nr), data->in_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_in_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->in_max[nr] = IN_TO_REG(val, nr); i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MAX(nr), data->in_max[nr]); mutex_unlock(&data->update_lock); return count; } #define show_in_offset(offset) \ static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \ show_in, NULL, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \ show_in_min, set_in_min, offset); \ static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \ show_in_max, set_in_max, offset); show_in_offset(0) show_in_offset(1) show_in_offset(2) show_in_offset(3) show_in_offset(4) show_in_offset(5) show_in_offset(6) show_in_offset(7) /* Temperatures */ static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr])); } static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr])); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr])); } static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_min[nr] = TEMP_TO_REG(val); i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MIN[nr], data->temp_min[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_max[nr] = TEMP_TO_REG(val); i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MAX[nr], data->temp_max[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_offset[nr])); } static ssize_t set_temp_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_offset[nr] = TEMP_TO_REG(val); if (nr > 1) i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_OFFSET(nr), data->temp_offset[nr]); else if (data->temp_offset[nr] != 0) { /* * offset[0] and offset[1] share the same register, * SFR bit 4 activates offset[0] */ i2c_smbus_write_byte_data(client, SMSC47M192_REG_SFR, (sfr & 0xef) | (nr == 0 ? 0x10 : 0)); data->temp_offset[1-nr] = 0; i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_OFFSET(nr), data->temp_offset[nr]); } else if ((sfr & 0x10) == (nr == 0 ? 0x10 : 0)) i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_OFFSET(nr), 0); mutex_unlock(&data->update_lock); return count; } #define show_temp_index(index) \ static SENSOR_DEVICE_ATTR(temp##index##_input, S_IRUGO, \ show_temp, NULL, index-1); \ static SENSOR_DEVICE_ATTR(temp##index##_min, S_IRUGO | S_IWUSR, \ show_temp_min, set_temp_min, index-1); \ static SENSOR_DEVICE_ATTR(temp##index##_max, S_IRUGO | S_IWUSR, \ show_temp_max, set_temp_max, index-1); \ static SENSOR_DEVICE_ATTR(temp##index##_offset, S_IRUGO | S_IWUSR, \ show_temp_offset, set_temp_offset, index-1); show_temp_index(1) show_temp_index(2) show_temp_index(3) /* VID */ static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf) { struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL); static ssize_t show_vrm(struct device *dev, struct device_attribute *attr, char *buf) { struct smsc47m192_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", data->vrm); } static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct smsc47m192_data *data = dev_get_drvdata(dev); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; data->vrm = val; return count; } static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm); /* Alarms */ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; struct smsc47m192_data *data = smsc47m192_update_device(dev); return sprintf(buf, "%u\n", (data->alarms & nr) ? 1 : 0); } static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0x0010); static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0x0020); static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0x0040); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 0x4000); static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 0x8000); static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0x0001); static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0x0002); static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 0x0004); static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 0x0008); static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 0x0100); static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 0x0200); static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 0x0400); static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 0x0800); static struct attribute *smsc47m192_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_min.dev_attr.attr, &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_alarm.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_alarm.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_alarm.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_alarm.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in5_alarm.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in6_alarm.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in7_min.dev_attr.attr, &sensor_dev_attr_in7_max.dev_attr.attr, &sensor_dev_attr_in7_alarm.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_offset.dev_attr.attr, &sensor_dev_attr_temp1_alarm.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp2_offset.dev_attr.attr, &sensor_dev_attr_temp2_alarm.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp3_offset.dev_attr.attr, &sensor_dev_attr_temp3_alarm.dev_attr.attr, &sensor_dev_attr_temp3_fault.dev_attr.attr, &dev_attr_cpu0_vid.attr, &dev_attr_vrm.attr, NULL }; static const struct attribute_group smsc47m192_group = { .attrs = smsc47m192_attributes, }; static struct attribute *smsc47m192_attributes_in4[] = { &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_alarm.dev_attr.attr, NULL }; static const struct attribute_group smsc47m192_group_in4 = { .attrs = smsc47m192_attributes_in4, }; static void smsc47m192_init_client(struct i2c_client *client) { int i; u8 config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG); u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR); /* select cycle mode (pause 1 sec between updates) */ i2c_smbus_write_byte_data(client, SMSC47M192_REG_SFR, (sfr & 0xfd) | 0x02); if (!(config & 0x01)) { /* initialize alarm limits */ for (i = 0; i < 8; i++) { i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MIN(i), 0); i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MAX(i), 0xff); } for (i = 0; i < 3; i++) { i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MIN[i], 0x80); i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MAX[i], 0x7f); } /* start monitoring */ i2c_smbus_write_byte_data(client, SMSC47M192_REG_CONFIG, (config & 0xf7) | 0x01); } } /* Return 0 if detection is successful, -ENODEV otherwise */ static int smsc47m192_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int version; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* Detection criteria from sensors_detect script */ version = i2c_smbus_read_byte_data(client, SMSC47M192_REG_VERSION); if (i2c_smbus_read_byte_data(client, SMSC47M192_REG_COMPANY_ID) == 0x55 && (version & 0xf0) == 0x20 && (i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID) & 0x70) == 0x00 && (i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID4) & 0xfe) == 0x80) { dev_info(&adapter->dev, "found SMSC47M192 or compatible, " "version 2, stepping A%d\n", version & 0x0f); } else { dev_dbg(&adapter->dev, "SMSC47M192 detection failed at 0x%02x\n", client->addr); return -ENODEV; } strlcpy(info->type, "smsc47m192", I2C_NAME_SIZE); return 0; } static int smsc47m192_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct smsc47m192_data *data; int config; int err; data = kzalloc(sizeof(struct smsc47m192_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(client, data); data->vrm = vid_which_vrm(); mutex_init(&data->update_lock); /* Initialize the SMSC47M192 chip */ smsc47m192_init_client(client); /* Register sysfs hooks */ err = sysfs_create_group(&client->dev.kobj, &smsc47m192_group); if (err) goto exit_free; /* Pin 110 is either in4 (+12V) or VID4 */ config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG); if (!(config & 0x20)) { err = sysfs_create_group(&client->dev.kobj, &smsc47m192_group_in4); if (err) goto exit_remove_files; } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&client->dev.kobj, &smsc47m192_group); sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4); exit_free: kfree(data); exit: return err; } static int smsc47m192_remove(struct i2c_client *client) { struct smsc47m192_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &smsc47m192_group); sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4); kfree(data); return 0; } static struct smsc47m192_data *smsc47m192_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct smsc47m192_data *data = i2c_get_clientdata(client); int i, config; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ + HZ / 2) || !data->valid) { u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR); dev_dbg(&client->dev, "Starting smsc47m192 update\n"); for (i = 0; i <= 7; i++) { data->in[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_IN(i)); data->in_min[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_IN_MIN(i)); data->in_max[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_IN_MAX(i)); } for (i = 0; i < 3; i++) { data->temp[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_TEMP[i]); data->temp_max[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_TEMP_MAX[i]); data->temp_min[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_TEMP_MIN[i]); } for (i = 1; i < 3; i++) data->temp_offset[i] = i2c_smbus_read_byte_data(client, SMSC47M192_REG_TEMP_OFFSET(i)); /* * first offset is temp_offset[0] if SFR bit 4 is set, * temp_offset[1] otherwise */ if (sfr & 0x10) { data->temp_offset[0] = data->temp_offset[1]; data->temp_offset[1] = 0; } else data->temp_offset[0] = 0; data->vid = i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID) & 0x0f; config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG); if (config & 0x20) data->vid |= (i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID4) & 0x01) << 4; data->alarms = i2c_smbus_read_byte_data(client, SMSC47M192_REG_ALARM1) | (i2c_smbus_read_byte_data(client, SMSC47M192_REG_ALARM2) << 8); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } module_i2c_driver(smsc47m192_driver); MODULE_AUTHOR("Hartmut Rick <linux@rick.claranet.de>"); MODULE_DESCRIPTION("SMSC47M192 driver"); MODULE_LICENSE("GPL");
gpl-2.0
sameerkhan07/furnace_kernel_motorola_falcon
drivers/hwmon/max6642.c
4857
9835
/* * Driver for +/-1 degree C, SMBus-Compatible Remote/Local Temperature Sensor * with Overtemperature Alarm * * Copyright (C) 2011 AppearTV AS * * Derived from: * * Based on the max1619 driver. * Copyright (C) 2003-2004 Alexey Fisher <fishor@mail.ru> * Jean Delvare <khali@linux-fr.org> * * The MAX6642 is a sensor chip made by Maxim. * It reports up to two temperatures (its own plus up to * one external one). Complete datasheet can be * obtained from Maxim's website at: * http://datasheets.maxim-ic.com/en/ds/MAX6642.pdf * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; /* * The MAX6642 registers */ #define MAX6642_REG_R_MAN_ID 0xFE #define MAX6642_REG_R_CONFIG 0x03 #define MAX6642_REG_W_CONFIG 0x09 #define MAX6642_REG_R_STATUS 0x02 #define MAX6642_REG_R_LOCAL_TEMP 0x00 #define MAX6642_REG_R_LOCAL_TEMPL 0x11 #define MAX6642_REG_R_LOCAL_HIGH 0x05 #define MAX6642_REG_W_LOCAL_HIGH 0x0B #define MAX6642_REG_R_REMOTE_TEMP 0x01 #define MAX6642_REG_R_REMOTE_TEMPL 0x10 #define MAX6642_REG_R_REMOTE_HIGH 0x07 #define MAX6642_REG_W_REMOTE_HIGH 0x0D /* * Conversions */ static int temp_from_reg10(int val) { return val * 250; } static int temp_from_reg(int val) { return val * 1000; } static int temp_to_reg(int val) { return val / 1000; } /* * Client data (each client gets its own) */ struct max6642_data { struct device *hwmon_dev; struct mutex update_lock; bool valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ /* registers values */ u16 temp_input[2]; /* local/remote */ u16 temp_high[2]; /* local/remote */ u8 alarms; }; /* * Real code */ static void max6642_init_client(struct i2c_client *client) { u8 config; struct max6642_data *data = i2c_get_clientdata(client); /* * Start the conversions. */ config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG); if (config & 0x40) i2c_smbus_write_byte_data(client, MAX6642_REG_W_CONFIG, config & 0xBF); /* run */ data->temp_high[0] = i2c_smbus_read_byte_data(client, MAX6642_REG_R_LOCAL_HIGH); data->temp_high[1] = i2c_smbus_read_byte_data(client, MAX6642_REG_R_REMOTE_HIGH); } /* Return 0 if detection is successful, -ENODEV otherwise */ static int max6642_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; u8 reg_config, reg_status, man_id; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; /* identification */ man_id = i2c_smbus_read_byte_data(client, MAX6642_REG_R_MAN_ID); if (man_id != 0x4D) return -ENODEV; /* sanity check */ if (i2c_smbus_read_byte_data(client, 0x04) != 0x4D || i2c_smbus_read_byte_data(client, 0x06) != 0x4D || i2c_smbus_read_byte_data(client, 0xff) != 0x4D) return -ENODEV; /* * We read the config and status register, the 4 lower bits in the * config register should be zero and bit 5, 3, 1 and 0 should be * zero in the status register. */ reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG); if ((reg_config & 0x0f) != 0x00) return -ENODEV; /* in between, another round of sanity checks */ if (i2c_smbus_read_byte_data(client, 0x04) != reg_config || i2c_smbus_read_byte_data(client, 0x06) != reg_config || i2c_smbus_read_byte_data(client, 0xff) != reg_config) return -ENODEV; reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS); if ((reg_status & 0x2b) != 0x00) return -ENODEV; strlcpy(info->type, "max6642", I2C_NAME_SIZE); return 0; } static struct max6642_data *max6642_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct max6642_data *data = i2c_get_clientdata(client); u16 val, tmp; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { dev_dbg(&client->dev, "Updating max6642 data.\n"); val = i2c_smbus_read_byte_data(client, MAX6642_REG_R_LOCAL_TEMPL); tmp = (val >> 6) & 3; val = i2c_smbus_read_byte_data(client, MAX6642_REG_R_LOCAL_TEMP); val = (val << 2) | tmp; data->temp_input[0] = val; val = i2c_smbus_read_byte_data(client, MAX6642_REG_R_REMOTE_TEMPL); tmp = (val >> 6) & 3; val = i2c_smbus_read_byte_data(client, MAX6642_REG_R_REMOTE_TEMP); val = (val << 2) | tmp; data->temp_input[1] = val; data->alarms = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* * Sysfs stuff */ static ssize_t show_temp_max10(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct max6642_data *data = max6642_update_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); return sprintf(buf, "%d\n", temp_from_reg10(data->temp_input[attr->index])); } static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { struct max6642_data *data = max6642_update_device(dev); struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr); return sprintf(buf, "%d\n", temp_from_reg(data->temp_high[attr2->nr])); } static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; int err; struct i2c_client *client = to_i2c_client(dev); struct max6642_data *data = i2c_get_clientdata(client); struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr); err = kstrtoul(buf, 10, &val); if (err < 0) return err; mutex_lock(&data->update_lock); data->temp_high[attr2->nr] = SENSORS_LIMIT(temp_to_reg(val), 0, 255); i2c_smbus_write_byte_data(client, attr2->index, data->temp_high[attr2->nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) { int bitnr = to_sensor_dev_attr(attr)->index; struct max6642_data *data = max6642_update_device(dev); return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_max10, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_max10, NULL, 1); static SENSOR_DEVICE_ATTR_2(temp1_max, S_IWUSR | S_IRUGO, show_temp_max, set_temp_max, 0, MAX6642_REG_W_LOCAL_HIGH); static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp_max, set_temp_max, 1, MAX6642_REG_W_REMOTE_HIGH); static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2); static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6); static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4); static struct attribute *max6642_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp2_fault.dev_attr.attr, &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, NULL }; static const struct attribute_group max6642_group = { .attrs = max6642_attributes, }; static int max6642_probe(struct i2c_client *new_client, const struct i2c_device_id *id) { struct max6642_data *data; int err; data = kzalloc(sizeof(struct max6642_data), GFP_KERNEL); if (!data) { err = -ENOMEM; goto exit; } i2c_set_clientdata(new_client, data); mutex_init(&data->update_lock); /* Initialize the MAX6642 chip */ max6642_init_client(new_client); /* Register sysfs hooks */ err = sysfs_create_group(&new_client->dev.kobj, &max6642_group); if (err) goto exit_free; data->hwmon_dev = hwmon_device_register(&new_client->dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove_files; } return 0; exit_remove_files: sysfs_remove_group(&new_client->dev.kobj, &max6642_group); exit_free: kfree(data); exit: return err; } static int max6642_remove(struct i2c_client *client) { struct max6642_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &max6642_group); kfree(data); return 0; } /* * Driver data (common to all clients) */ static const struct i2c_device_id max6642_id[] = { { "max6642", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max6642_id); static struct i2c_driver max6642_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "max6642", }, .probe = max6642_probe, .remove = max6642_remove, .id_table = max6642_id, .detect = max6642_detect, .address_list = normal_i2c, }; module_i2c_driver(max6642_driver); MODULE_AUTHOR("Per Dalen <per.dalen@appeartv.com>"); MODULE_DESCRIPTION("MAX6642 sensor driver"); MODULE_LICENSE("GPL");
gpl-2.0
NoelMacwan/android_kernel_sony_apq8064
drivers/i2c/busses/i2c-stu300.c
5113
27425
/* * Copyright (C) 2007-2009 ST-Ericsson AB * License terms: GNU General Public License (GPL) version 2 * ST DDC I2C master mode driver, used in e.g. U300 series platforms. * Author: Linus Walleij <linus.walleij@stericsson.com> * Author: Jonas Aaberg <jonas.aberg@stericsson.com> */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> /* the name of this kernel module */ #define NAME "stu300" /* CR (Control Register) 8bit (R/W) */ #define I2C_CR (0x00000000) #define I2C_CR_RESET_VALUE (0x00) #define I2C_CR_RESET_UMASK (0x00) #define I2C_CR_DDC1_ENABLE (0x80) #define I2C_CR_TRANS_ENABLE (0x40) #define I2C_CR_PERIPHERAL_ENABLE (0x20) #define I2C_CR_DDC2B_ENABLE (0x10) #define I2C_CR_START_ENABLE (0x08) #define I2C_CR_ACK_ENABLE (0x04) #define I2C_CR_STOP_ENABLE (0x02) #define I2C_CR_INTERRUPT_ENABLE (0x01) /* SR1 (Status Register 1) 8bit (R/-) */ #define I2C_SR1 (0x00000004) #define I2C_SR1_RESET_VALUE (0x00) #define I2C_SR1_RESET_UMASK (0x00) #define I2C_SR1_EVF_IND (0x80) #define I2C_SR1_ADD10_IND (0x40) #define I2C_SR1_TRA_IND (0x20) #define I2C_SR1_BUSY_IND (0x10) #define I2C_SR1_BTF_IND (0x08) #define I2C_SR1_ADSL_IND (0x04) #define I2C_SR1_MSL_IND (0x02) #define I2C_SR1_SB_IND (0x01) /* SR2 (Status Register 2) 8bit (R/-) */ #define I2C_SR2 (0x00000008) #define I2C_SR2_RESET_VALUE (0x00) #define I2C_SR2_RESET_UMASK (0x40) #define I2C_SR2_MASK (0xBF) #define I2C_SR2_SCLFAL_IND (0x80) #define I2C_SR2_ENDAD_IND (0x20) #define I2C_SR2_AF_IND (0x10) #define I2C_SR2_STOPF_IND (0x08) #define I2C_SR2_ARLO_IND (0x04) #define I2C_SR2_BERR_IND (0x02) #define I2C_SR2_DDC2BF_IND (0x01) /* CCR (Clock Control Register) 8bit (R/W) */ #define I2C_CCR (0x0000000C) #define I2C_CCR_RESET_VALUE (0x00) #define I2C_CCR_RESET_UMASK (0x00) #define I2C_CCR_MASK (0xFF) #define I2C_CCR_FMSM (0x80) #define I2C_CCR_CC_MASK (0x7F) /* OAR1 (Own Address Register 1) 8bit (R/W) */ #define I2C_OAR1 (0x00000010) #define I2C_OAR1_RESET_VALUE (0x00) #define I2C_OAR1_RESET_UMASK (0x00) #define I2C_OAR1_ADD_MASK (0xFF) /* OAR2 (Own Address Register 2) 8bit (R/W) */ #define I2C_OAR2 (0x00000014) #define I2C_OAR2_RESET_VALUE (0x40) #define I2C_OAR2_RESET_UMASK (0x19) #define I2C_OAR2_MASK (0xE6) #define I2C_OAR2_FR_25_10MHZ (0x00) #define I2C_OAR2_FR_10_1667MHZ (0x20) #define I2C_OAR2_FR_1667_2667MHZ (0x40) #define I2C_OAR2_FR_2667_40MHZ (0x60) #define I2C_OAR2_FR_40_5333MHZ (0x80) #define I2C_OAR2_FR_5333_66MHZ (0xA0) #define I2C_OAR2_FR_66_80MHZ (0xC0) #define I2C_OAR2_FR_80_100MHZ (0xE0) #define I2C_OAR2_FR_MASK (0xE0) #define I2C_OAR2_ADD_MASK (0x06) /* DR (Data Register) 8bit (R/W) */ #define I2C_DR (0x00000018) #define I2C_DR_RESET_VALUE (0x00) #define I2C_DR_RESET_UMASK (0xFF) #define I2C_DR_D_MASK (0xFF) /* ECCR (Extended Clock Control Register) 8bit (R/W) */ #define I2C_ECCR (0x0000001C) #define I2C_ECCR_RESET_VALUE (0x00) #define I2C_ECCR_RESET_UMASK (0xE0) #define I2C_ECCR_MASK (0x1F) #define I2C_ECCR_CC_MASK (0x1F) /* * These events are more or less responses to commands * sent into the hardware, presumably reflecting the state * of an internal state machine. */ enum stu300_event { STU300_EVENT_NONE = 0, STU300_EVENT_1, STU300_EVENT_2, STU300_EVENT_3, STU300_EVENT_4, STU300_EVENT_5, STU300_EVENT_6, STU300_EVENT_7, STU300_EVENT_8, STU300_EVENT_9 }; enum stu300_error { STU300_ERROR_NONE = 0, STU300_ERROR_ACKNOWLEDGE_FAILURE, STU300_ERROR_BUS_ERROR, STU300_ERROR_ARBITRATION_LOST, STU300_ERROR_UNKNOWN }; /* timeout waiting for the controller to respond */ #define STU300_TIMEOUT (msecs_to_jiffies(1000)) /* * The number of address send athemps tried before giving up. * If the first one failes it seems like 5 to 8 attempts are required. */ #define NUM_ADDR_RESEND_ATTEMPTS 12 /* I2C clock speed, in Hz 0-400kHz*/ static unsigned int scl_frequency = 100000; module_param(scl_frequency, uint, 0644); /** * struct stu300_dev - the stu300 driver state holder * @pdev: parent platform device * @adapter: corresponding I2C adapter * @phybase: location of I/O area in memory * @physize: size of I/O area in memory * @clk: hardware block clock * @irq: assigned interrupt line * @cmd_issue_lock: this locks the following cmd_ variables * @cmd_complete: acknowledge completion for an I2C command * @cmd_event: expected event coming in as a response to a command * @cmd_err: error code as response to a command * @speed: current bus speed in Hz * @msg_index: index of current message * @msg_len: length of current message */ struct stu300_dev { struct platform_device *pdev; struct i2c_adapter adapter; resource_size_t phybase; resource_size_t physize; void __iomem *virtbase; struct clk *clk; int irq; spinlock_t cmd_issue_lock; struct completion cmd_complete; enum stu300_event cmd_event; enum stu300_error cmd_err; unsigned int speed; int msg_index; int msg_len; }; /* Local forward function declarations */ static int stu300_init_hw(struct stu300_dev *dev); /* * The block needs writes in both MSW and LSW in order * for all data lines to reach their destination. */ static inline void stu300_wr8(u32 value, void __iomem *address) { writel((value << 16) | value, address); } /* * This merely masks off the duplicates which appear * in bytes 1-3. You _MUST_ use 32-bit bus access on this * device, else it will not work. */ static inline u32 stu300_r8(void __iomem *address) { return readl(address) & 0x000000FFU; } static void stu300_irq_enable(struct stu300_dev *dev) { u32 val; val = stu300_r8(dev->virtbase + I2C_CR); val |= I2C_CR_INTERRUPT_ENABLE; /* Twice paranoia (possible HW glitch) */ stu300_wr8(val, dev->virtbase + I2C_CR); stu300_wr8(val, dev->virtbase + I2C_CR); } static void stu300_irq_disable(struct stu300_dev *dev) { u32 val; val = stu300_r8(dev->virtbase + I2C_CR); val &= ~I2C_CR_INTERRUPT_ENABLE; /* Twice paranoia (possible HW glitch) */ stu300_wr8(val, dev->virtbase + I2C_CR); stu300_wr8(val, dev->virtbase + I2C_CR); } /* * Tells whether a certain event or events occurred in * response to a command. The events represent states in * the internal state machine of the hardware. The events * are not very well described in the hardware * documentation and can only be treated as abstract state * machine states. * * @ret 0 = event has not occurred or unknown error, any * other value means the correct event occurred or an error. */ static int stu300_event_occurred(struct stu300_dev *dev, enum stu300_event mr_event) { u32 status1; u32 status2; /* What event happened? */ status1 = stu300_r8(dev->virtbase + I2C_SR1); if (!(status1 & I2C_SR1_EVF_IND)) /* No event at all */ return 0; status2 = stu300_r8(dev->virtbase + I2C_SR2); /* Block any multiple interrupts */ stu300_irq_disable(dev); /* Check for errors first */ if (status2 & I2C_SR2_AF_IND) { dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE; return 1; } else if (status2 & I2C_SR2_BERR_IND) { dev->cmd_err = STU300_ERROR_BUS_ERROR; return 1; } else if (status2 & I2C_SR2_ARLO_IND) { dev->cmd_err = STU300_ERROR_ARBITRATION_LOST; return 1; } switch (mr_event) { case STU300_EVENT_1: if (status1 & I2C_SR1_ADSL_IND) return 1; break; case STU300_EVENT_2: case STU300_EVENT_3: case STU300_EVENT_7: case STU300_EVENT_8: if (status1 & I2C_SR1_BTF_IND) { return 1; } break; case STU300_EVENT_4: if (status2 & I2C_SR2_STOPF_IND) return 1; break; case STU300_EVENT_5: if (status1 & I2C_SR1_SB_IND) /* Clear start bit */ return 1; break; case STU300_EVENT_6: if (status2 & I2C_SR2_ENDAD_IND) { /* First check for any errors */ return 1; } break; case STU300_EVENT_9: if (status1 & I2C_SR1_ADD10_IND) return 1; break; default: break; } /* If we get here, we're on thin ice. * Here we are in a status where we have * gotten a response that does not match * what we requested. */ dev->cmd_err = STU300_ERROR_UNKNOWN; dev_err(&dev->pdev->dev, "Unhandled interrupt! %d sr1: 0x%x sr2: 0x%x\n", mr_event, status1, status2); return 0; } static irqreturn_t stu300_irh(int irq, void *data) { struct stu300_dev *dev = data; int res; /* Just make sure that the block is clocked */ clk_enable(dev->clk); /* See if this was what we were waiting for */ spin_lock(&dev->cmd_issue_lock); res = stu300_event_occurred(dev, dev->cmd_event); if (res || dev->cmd_err != STU300_ERROR_NONE) complete(&dev->cmd_complete); spin_unlock(&dev->cmd_issue_lock); clk_disable(dev->clk); return IRQ_HANDLED; } /* * Sends a command and then waits for the bits masked by *flagmask* * to go high or low by IRQ awaiting. */ static int stu300_start_and_await_event(struct stu300_dev *dev, u8 cr_value, enum stu300_event mr_event) { int ret; if (unlikely(irqs_disabled())) { /* TODO: implement polling for this case if need be. */ WARN(1, "irqs are disabled, cannot poll for event\n"); return -EIO; } /* Lock command issue, fill in an event we wait for */ spin_lock_irq(&dev->cmd_issue_lock); init_completion(&dev->cmd_complete); dev->cmd_err = STU300_ERROR_NONE; dev->cmd_event = mr_event; spin_unlock_irq(&dev->cmd_issue_lock); /* Turn on interrupt, send command and wait. */ cr_value |= I2C_CR_INTERRUPT_ENABLE; stu300_wr8(cr_value, dev->virtbase + I2C_CR); ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, STU300_TIMEOUT); if (ret < 0) { dev_err(&dev->pdev->dev, "wait_for_completion_interruptible_timeout() " "returned %d waiting for event %04x\n", ret, mr_event); return ret; } if (ret == 0) { dev_err(&dev->pdev->dev, "controller timed out " "waiting for event %d, reinit hardware\n", mr_event); (void) stu300_init_hw(dev); return -ETIMEDOUT; } if (dev->cmd_err != STU300_ERROR_NONE) { dev_err(&dev->pdev->dev, "controller (start) " "error %d waiting for event %d, reinit hardware\n", dev->cmd_err, mr_event); (void) stu300_init_hw(dev); return -EIO; } return 0; } /* * This waits for a flag to be set, if it is not set on entry, an interrupt is * configured to wait for the flag using a completion. */ static int stu300_await_event(struct stu300_dev *dev, enum stu300_event mr_event) { int ret; if (unlikely(irqs_disabled())) { /* TODO: implement polling for this case if need be. */ dev_err(&dev->pdev->dev, "irqs are disabled on this " "system!\n"); return -EIO; } /* Is it already here? */ spin_lock_irq(&dev->cmd_issue_lock); dev->cmd_err = STU300_ERROR_NONE; dev->cmd_event = mr_event; init_completion(&dev->cmd_complete); /* Turn on the I2C interrupt for current operation */ stu300_irq_enable(dev); /* Unlock the command block and wait for the event to occur */ spin_unlock_irq(&dev->cmd_issue_lock); ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, STU300_TIMEOUT); if (ret < 0) { dev_err(&dev->pdev->dev, "wait_for_completion_interruptible_timeout()" "returned %d waiting for event %04x\n", ret, mr_event); return ret; } if (ret == 0) { if (mr_event != STU300_EVENT_6) { dev_err(&dev->pdev->dev, "controller " "timed out waiting for event %d, reinit " "hardware\n", mr_event); (void) stu300_init_hw(dev); } return -ETIMEDOUT; } if (dev->cmd_err != STU300_ERROR_NONE) { if (mr_event != STU300_EVENT_6) { dev_err(&dev->pdev->dev, "controller " "error (await_event) %d waiting for event %d, " "reinit hardware\n", dev->cmd_err, mr_event); (void) stu300_init_hw(dev); } return -EIO; } return 0; } /* * Waits for the busy bit to go low by repeated polling. */ #define BUSY_RELEASE_ATTEMPTS 10 static int stu300_wait_while_busy(struct stu300_dev *dev) { unsigned long timeout; int i; for (i = 0; i < BUSY_RELEASE_ATTEMPTS; i++) { timeout = jiffies + STU300_TIMEOUT; while (!time_after(jiffies, timeout)) { /* Is not busy? */ if ((stu300_r8(dev->virtbase + I2C_SR1) & I2C_SR1_BUSY_IND) == 0) return 0; msleep(1); } dev_err(&dev->pdev->dev, "transaction timed out " "waiting for device to be free (not busy). " "Attempt: %d\n", i+1); dev_err(&dev->pdev->dev, "base address = " "0x%08x, reinit hardware\n", (u32) dev->virtbase); (void) stu300_init_hw(dev); } dev_err(&dev->pdev->dev, "giving up after %d attempts " "to reset the bus.\n", BUSY_RELEASE_ATTEMPTS); return -ETIMEDOUT; } struct stu300_clkset { unsigned long rate; u32 setting; }; static const struct stu300_clkset stu300_clktable[] = { { 0, 0xFFU }, { 2500000, I2C_OAR2_FR_25_10MHZ }, { 10000000, I2C_OAR2_FR_10_1667MHZ }, { 16670000, I2C_OAR2_FR_1667_2667MHZ }, { 26670000, I2C_OAR2_FR_2667_40MHZ }, { 40000000, I2C_OAR2_FR_40_5333MHZ }, { 53330000, I2C_OAR2_FR_5333_66MHZ }, { 66000000, I2C_OAR2_FR_66_80MHZ }, { 80000000, I2C_OAR2_FR_80_100MHZ }, { 100000000, 0xFFU }, }; static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate) { u32 val; int i = 0; /* Locate the appropriate clock setting */ while (i < ARRAY_SIZE(stu300_clktable) - 1 && stu300_clktable[i].rate < clkrate) i++; if (stu300_clktable[i].setting == 0xFFU) { dev_err(&dev->pdev->dev, "too %s clock rate requested " "(%lu Hz).\n", i ? "high" : "low", clkrate); return -EINVAL; } stu300_wr8(stu300_clktable[i].setting, dev->virtbase + I2C_OAR2); dev_dbg(&dev->pdev->dev, "Clock rate %lu Hz, I2C bus speed %d Hz " "virtbase %p\n", clkrate, dev->speed, dev->virtbase); if (dev->speed > 100000) /* Fast Mode I2C */ val = ((clkrate/dev->speed) - 9)/3 + 1; else /* Standard Mode I2C */ val = ((clkrate/dev->speed) - 7)/2 + 1; /* According to spec the divider must be > 2 */ if (val < 0x002) { dev_err(&dev->pdev->dev, "too low clock rate (%lu Hz).\n", clkrate); return -EINVAL; } /* We have 12 bits clock divider only! */ if (val & 0xFFFFF000U) { dev_err(&dev->pdev->dev, "too high clock rate (%lu Hz).\n", clkrate); return -EINVAL; } if (dev->speed > 100000) { /* CC6..CC0 */ stu300_wr8((val & I2C_CCR_CC_MASK) | I2C_CCR_FMSM, dev->virtbase + I2C_CCR); dev_dbg(&dev->pdev->dev, "set clock divider to 0x%08x, " "Fast Mode I2C\n", val); } else { /* CC6..CC0 */ stu300_wr8((val & I2C_CCR_CC_MASK), dev->virtbase + I2C_CCR); dev_dbg(&dev->pdev->dev, "set clock divider to " "0x%08x, Standard Mode I2C\n", val); } /* CC11..CC7 */ stu300_wr8(((val >> 7) & 0x1F), dev->virtbase + I2C_ECCR); return 0; } static int stu300_init_hw(struct stu300_dev *dev) { u32 dummy; unsigned long clkrate; int ret; /* Disable controller */ stu300_wr8(0x00, dev->virtbase + I2C_CR); /* * Set own address to some default value (0x00). * We do not support slave mode anyway. */ stu300_wr8(0x00, dev->virtbase + I2C_OAR1); /* * The I2C controller only operates properly in 26 MHz but we * program this driver as if we didn't know. This will also set the two * high bits of the own address to zero as well. * There is no known hardware issue with running in 13 MHz * However, speeds over 200 kHz are not used. */ clkrate = clk_get_rate(dev->clk); ret = stu300_set_clk(dev, clkrate); if (ret) return ret; /* * Enable block, do it TWICE (hardware glitch) * Setting bit 7 can enable DDC mode. (Not used currently.) */ stu300_wr8(I2C_CR_PERIPHERAL_ENABLE, dev->virtbase + I2C_CR); stu300_wr8(I2C_CR_PERIPHERAL_ENABLE, dev->virtbase + I2C_CR); /* Make a dummy read of the status register SR1 & SR2 */ dummy = stu300_r8(dev->virtbase + I2C_SR2); dummy = stu300_r8(dev->virtbase + I2C_SR1); return 0; } /* Send slave address. */ static int stu300_send_address(struct stu300_dev *dev, struct i2c_msg *msg, int resend) { u32 val; int ret; if (msg->flags & I2C_M_TEN) /* This is probably how 10 bit addresses look */ val = (0xf0 | (((u32) msg->addr & 0x300) >> 7)) & I2C_DR_D_MASK; else val = ((msg->addr << 1) & I2C_DR_D_MASK); if (msg->flags & I2C_M_RD) { /* This is the direction bit */ val |= 0x01; if (resend) dev_dbg(&dev->pdev->dev, "read resend\n"); } else if (resend) dev_dbg(&dev->pdev->dev, "write resend\n"); stu300_wr8(val, dev->virtbase + I2C_DR); /* For 10bit addressing, await 10bit request (EVENT 9) */ if (msg->flags & I2C_M_TEN) { ret = stu300_await_event(dev, STU300_EVENT_9); /* * The slave device wants a 10bit address, send the rest * of the bits (the LSBits) */ val = msg->addr & I2C_DR_D_MASK; /* This clears "event 9" */ stu300_wr8(val, dev->virtbase + I2C_DR); if (ret != 0) return ret; } /* FIXME: Why no else here? two events for 10bit? * Await event 6 (normal) or event 9 (10bit) */ if (resend) dev_dbg(&dev->pdev->dev, "await event 6\n"); ret = stu300_await_event(dev, STU300_EVENT_6); /* * Clear any pending EVENT 6 no matter what happened during * await_event. */ val = stu300_r8(dev->virtbase + I2C_CR); val |= I2C_CR_PERIPHERAL_ENABLE; stu300_wr8(val, dev->virtbase + I2C_CR); return ret; } static int stu300_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) { u32 cr; u32 val; u32 i; int ret; int attempts = 0; struct stu300_dev *dev = i2c_get_adapdata(adap); clk_enable(dev->clk); /* Remove this if (0) to trace each and every message. */ if (0) { dev_dbg(&dev->pdev->dev, "I2C message to: 0x%04x, len: %d, " "flags: 0x%04x, stop: %d\n", msg->addr, msg->len, msg->flags, stop); } /* Zero-length messages are not supported by this hardware */ if (msg->len == 0) { ret = -EINVAL; goto exit_disable; } /* * For some reason, sending the address sometimes fails when running * on the 13 MHz clock. No interrupt arrives. This is a work around, * which tries to restart and send the address up to 10 times before * really giving up. Usually 5 to 8 attempts are enough. */ do { if (attempts) dev_dbg(&dev->pdev->dev, "wait while busy\n"); /* Check that the bus is free, or wait until some timeout */ ret = stu300_wait_while_busy(dev); if (ret != 0) goto exit_disable; if (attempts) dev_dbg(&dev->pdev->dev, "re-int hw\n"); /* * According to ST, there is no problem if the clock is * changed between 13 and 26 MHz during a transfer. */ ret = stu300_init_hw(dev); if (ret) goto exit_disable; /* Send a start condition */ cr = I2C_CR_PERIPHERAL_ENABLE; /* Setting the START bit puts the block in master mode */ if (!(msg->flags & I2C_M_NOSTART)) cr |= I2C_CR_START_ENABLE; if ((msg->flags & I2C_M_RD) && (msg->len > 1)) /* On read more than 1 byte, we need ack. */ cr |= I2C_CR_ACK_ENABLE; /* Check that it gets through */ if (!(msg->flags & I2C_M_NOSTART)) { if (attempts) dev_dbg(&dev->pdev->dev, "send start event\n"); ret = stu300_start_and_await_event(dev, cr, STU300_EVENT_5); } if (attempts) dev_dbg(&dev->pdev->dev, "send address\n"); if (ret == 0) /* Send address */ ret = stu300_send_address(dev, msg, attempts != 0); if (ret != 0) { attempts++; dev_dbg(&dev->pdev->dev, "failed sending address, " "retrying. Attempt: %d msg_index: %d/%d\n", attempts, dev->msg_index, dev->msg_len); } } while (ret != 0 && attempts < NUM_ADDR_RESEND_ATTEMPTS); if (attempts < NUM_ADDR_RESEND_ATTEMPTS && attempts > 0) { dev_dbg(&dev->pdev->dev, "managed to get address " "through after %d attempts\n", attempts); } else if (attempts == NUM_ADDR_RESEND_ATTEMPTS) { dev_dbg(&dev->pdev->dev, "I give up, tried %d times " "to resend address.\n", NUM_ADDR_RESEND_ATTEMPTS); goto exit_disable; } if (msg->flags & I2C_M_RD) { /* READ: we read the actual bytes one at a time */ for (i = 0; i < msg->len; i++) { if (i == msg->len-1) { /* * Disable ACK and set STOP condition before * reading last byte */ val = I2C_CR_PERIPHERAL_ENABLE; if (stop) val |= I2C_CR_STOP_ENABLE; stu300_wr8(val, dev->virtbase + I2C_CR); } /* Wait for this byte... */ ret = stu300_await_event(dev, STU300_EVENT_7); if (ret != 0) goto exit_disable; /* This clears event 7 */ msg->buf[i] = (u8) stu300_r8(dev->virtbase + I2C_DR); } } else { /* WRITE: we send the actual bytes one at a time */ for (i = 0; i < msg->len; i++) { /* Write the byte */ stu300_wr8(msg->buf[i], dev->virtbase + I2C_DR); /* Check status */ ret = stu300_await_event(dev, STU300_EVENT_8); /* Next write to DR will clear event 8 */ if (ret != 0) { dev_err(&dev->pdev->dev, "error awaiting " "event 8 (%d)\n", ret); goto exit_disable; } } /* Check NAK */ if (!(msg->flags & I2C_M_IGNORE_NAK)) { if (stu300_r8(dev->virtbase + I2C_SR2) & I2C_SR2_AF_IND) { dev_err(&dev->pdev->dev, "I2C payload " "send returned NAK!\n"); ret = -EIO; goto exit_disable; } } if (stop) { /* Send stop condition */ val = I2C_CR_PERIPHERAL_ENABLE; val |= I2C_CR_STOP_ENABLE; stu300_wr8(val, dev->virtbase + I2C_CR); } } /* Check that the bus is free, or wait until some timeout occurs */ ret = stu300_wait_while_busy(dev); if (ret != 0) { dev_err(&dev->pdev->dev, "timout waiting for transfer " "to commence.\n"); goto exit_disable; } /* Dummy read status registers */ val = stu300_r8(dev->virtbase + I2C_SR2); val = stu300_r8(dev->virtbase + I2C_SR1); ret = 0; exit_disable: /* Disable controller */ stu300_wr8(0x00, dev->virtbase + I2C_CR); clk_disable(dev->clk); return ret; } static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { int ret = -1; int i; struct stu300_dev *dev = i2c_get_adapdata(adap); dev->msg_len = num; for (i = 0; i < num; i++) { /* * Another driver appears to send stop for each message, * here we only do that for the last message. Possibly some * peripherals require this behaviour, then their drivers * have to send single messages in order to get "stop" for * each message. */ dev->msg_index = i; ret = stu300_xfer_msg(adap, &msgs[i], (i == (num - 1))); if (ret != 0) { num = ret; break; } } return num; } static u32 stu300_func(struct i2c_adapter *adap) { /* This is the simplest thing you can think of... */ return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR; } static const struct i2c_algorithm stu300_algo = { .master_xfer = stu300_xfer, .functionality = stu300_func, }; static int __init stu300_probe(struct platform_device *pdev) { struct stu300_dev *dev; struct i2c_adapter *adap; struct resource *res; int bus_nr; int ret = 0; char clk_name[] = "I2C0"; dev = kzalloc(sizeof(struct stu300_dev), GFP_KERNEL); if (!dev) { dev_err(&pdev->dev, "could not allocate device struct\n"); ret = -ENOMEM; goto err_no_devmem; } bus_nr = pdev->id; clk_name[3] += (char)bus_nr; dev->clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(dev->clk)) { ret = PTR_ERR(dev->clk); dev_err(&pdev->dev, "could not retrieve i2c bus clock\n"); goto err_no_clk; } dev->pdev = pdev; platform_set_drvdata(pdev, dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENOENT; goto err_no_resource; } dev->phybase = res->start; dev->physize = resource_size(res); if (request_mem_region(dev->phybase, dev->physize, NAME " I/O Area") == NULL) { ret = -EBUSY; goto err_no_ioregion; } dev->virtbase = ioremap(dev->phybase, dev->physize); dev_dbg(&pdev->dev, "initialize bus device I2C%d on virtual " "base %p\n", bus_nr, dev->virtbase); if (!dev->virtbase) { ret = -ENOMEM; goto err_no_ioremap; } dev->irq = platform_get_irq(pdev, 0); if (request_irq(dev->irq, stu300_irh, 0, NAME, dev)) { ret = -EIO; goto err_no_irq; } dev->speed = scl_frequency; clk_enable(dev->clk); ret = stu300_init_hw(dev); clk_disable(dev->clk); if (ret != 0) { dev_err(&dev->pdev->dev, "error initializing hardware.\n"); goto err_init_hw; } /* IRQ event handling initialization */ spin_lock_init(&dev->cmd_issue_lock); dev->cmd_event = STU300_EVENT_NONE; dev->cmd_err = STU300_ERROR_NONE; adap = &dev->adapter; adap->owner = THIS_MODULE; /* DDC class but actually often used for more generic I2C */ adap->class = I2C_CLASS_DDC; strlcpy(adap->name, "ST Microelectronics DDC I2C adapter", sizeof(adap->name)); adap->nr = bus_nr; adap->algo = &stu300_algo; adap->dev.parent = &pdev->dev; i2c_set_adapdata(adap, dev); /* i2c device drivers may be active on return from add_adapter() */ ret = i2c_add_numbered_adapter(adap); if (ret) { dev_err(&dev->pdev->dev, "failure adding ST Micro DDC " "I2C adapter\n"); goto err_add_adapter; } return 0; err_add_adapter: err_init_hw: free_irq(dev->irq, dev); err_no_irq: iounmap(dev->virtbase); err_no_ioremap: release_mem_region(dev->phybase, dev->physize); err_no_ioregion: platform_set_drvdata(pdev, NULL); err_no_resource: clk_put(dev->clk); err_no_clk: kfree(dev); err_no_devmem: dev_err(&pdev->dev, "failed to add " NAME " adapter: %d\n", pdev->id); return ret; } #ifdef CONFIG_PM static int stu300_suspend(struct platform_device *pdev, pm_message_t state) { struct stu300_dev *dev = platform_get_drvdata(pdev); /* Turn off everything */ stu300_wr8(0x00, dev->virtbase + I2C_CR); return 0; } static int stu300_resume(struct platform_device *pdev) { int ret = 0; struct stu300_dev *dev = platform_get_drvdata(pdev); clk_enable(dev->clk); ret = stu300_init_hw(dev); clk_disable(dev->clk); if (ret != 0) dev_err(&pdev->dev, "error re-initializing hardware.\n"); return ret; } #else #define stu300_suspend NULL #define stu300_resume NULL #endif static int __exit stu300_remove(struct platform_device *pdev) { struct stu300_dev *dev = platform_get_drvdata(pdev); i2c_del_adapter(&dev->adapter); /* Turn off everything */ stu300_wr8(0x00, dev->virtbase + I2C_CR); free_irq(dev->irq, dev); iounmap(dev->virtbase); release_mem_region(dev->phybase, dev->physize); clk_put(dev->clk); platform_set_drvdata(pdev, NULL); kfree(dev); return 0; } static struct platform_driver stu300_i2c_driver = { .driver = { .name = NAME, .owner = THIS_MODULE, }, .remove = __exit_p(stu300_remove), .suspend = stu300_suspend, .resume = stu300_resume, }; static int __init stu300_init(void) { return platform_driver_probe(&stu300_i2c_driver, stu300_probe); } static void __exit stu300_exit(void) { platform_driver_unregister(&stu300_i2c_driver); } /* * The systems using this bus often have very basic devices such * as regulators on the I2C bus, so this needs to be loaded early. * Therefore it is registered in the subsys_initcall(). */ subsys_initcall(stu300_init); module_exit(stu300_exit); MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); MODULE_DESCRIPTION("ST Micro DDC I2C adapter (" NAME ")"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" NAME);
gpl-2.0
Project-Elite/elite_kernelAOSP
arch/xtensa/kernel/asm-offsets.c
7929
4640
/* * arch/xtensa/kernel/asm-offsets.c * * Generates definitions from c-type structures used by assembly sources. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005 Tensilica Inc. * * Chris Zankel <chris@zankel.net> */ #include <asm/processor.h> #include <asm/coprocessor.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/thread_info.h> #include <linux/ptrace.h> #include <linux/mm.h> #include <linux/kbuild.h> #include <asm/ptrace.h> #include <asm/uaccess.h> int main(void) { /* struct pt_regs */ DEFINE(PT_PC, offsetof (struct pt_regs, pc)); DEFINE(PT_PS, offsetof (struct pt_regs, ps)); DEFINE(PT_DEPC, offsetof (struct pt_regs, depc)); DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause)); DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr)); DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause)); DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask)); DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg)); DEFINE(PT_LEND, offsetof (struct pt_regs, lend)); DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount)); DEFINE(PT_SAR, offsetof (struct pt_regs, sar)); DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel)); DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall)); DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1])); DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2])); DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3])); DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4])); DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5])); DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6])); DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7])); DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8])); DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9])); DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10])); DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11])); DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12])); DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13])); DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14])); DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15])); DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase)); DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart)); DEFINE(PT_SIZE, sizeof(struct pt_regs)); DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS])); DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS])); DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt)); DEFINE(XTREGS_OPT_SIZE, sizeof(xtregs_opt_t)); /* struct task_struct */ DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace)); DEFINE(TASK_MM, offsetof (struct task_struct, mm)); DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm)); DEFINE(TASK_PID, offsetof (struct task_struct, pid)); DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); /* struct thread_info (offset from start_struct) */ DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable)); #if XTENSA_HAVE_COPROCESSORS DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp)); DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp)); DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp)); DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp)); DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp)); DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp)); DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp)); DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp)); #endif DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds)); /* struct mm_struct */ DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users)); DEFINE(MM_PGD, offsetof (struct mm_struct, pgd)); DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context)); /* struct page */ DEFINE(PAGE_FLAGS, offsetof(struct page, flags)); /* constants */ DEFINE(_CLONE_VM, CLONE_VM); DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED); DEFINE(PG_ARCH_1, PG_arch_1); return 0; }
gpl-2.0
zarboz/Ville-5.0.1
arch/x86/lib/csum-partial_64.c
12281
3531
/* * arch/x86_64/lib/csum-partial.c * * This file contains network checksum routines that are better done * in an architecture-specific manner due to speed. */ #include <linux/compiler.h> #include <linux/module.h> #include <asm/checksum.h> static inline unsigned short from32to16(unsigned a) { unsigned short b = a >> 16; asm("addw %w2,%w0\n\t" "adcw $0,%w0\n" : "=r" (b) : "0" (b), "r" (a)); return b; } /* * Do a 64-bit checksum on an arbitrary memory area. * Returns a 32bit checksum. * * This isn't as time critical as it used to be because many NICs * do hardware checksumming these days. * * Things tried and found to not make it faster: * Manual Prefetching * Unrolling to an 128 bytes inner loop. * Using interleaving with more registers to break the carry chains. */ static unsigned do_csum(const unsigned char *buff, unsigned len) { unsigned odd, count; unsigned long result = 0; if (unlikely(len == 0)) return result; odd = 1 & (unsigned long) buff; if (unlikely(odd)) { result = *buff << 8; len--; buff++; } count = len >> 1; /* nr of 16-bit words.. */ if (count) { if (2 & (unsigned long) buff) { result += *(unsigned short *)buff; count--; len -= 2; buff += 2; } count >>= 1; /* nr of 32-bit words.. */ if (count) { unsigned long zero; unsigned count64; if (4 & (unsigned long) buff) { result += *(unsigned int *) buff; count--; len -= 4; buff += 4; } count >>= 1; /* nr of 64-bit words.. */ /* main loop using 64byte blocks */ zero = 0; count64 = count >> 3; while (count64) { asm("addq 0*8(%[src]),%[res]\n\t" "adcq 1*8(%[src]),%[res]\n\t" "adcq 2*8(%[src]),%[res]\n\t" "adcq 3*8(%[src]),%[res]\n\t" "adcq 4*8(%[src]),%[res]\n\t" "adcq 5*8(%[src]),%[res]\n\t" "adcq 6*8(%[src]),%[res]\n\t" "adcq 7*8(%[src]),%[res]\n\t" "adcq %[zero],%[res]" : [res] "=r" (result) : [src] "r" (buff), [zero] "r" (zero), "[res]" (result)); buff += 64; count64--; } /* last up to 7 8byte blocks */ count %= 8; while (count) { asm("addq %1,%0\n\t" "adcq %2,%0\n" : "=r" (result) : "m" (*(unsigned long *)buff), "r" (zero), "0" (result)); --count; buff += 8; } result = add32_with_carry(result>>32, result&0xffffffff); if (len & 4) { result += *(unsigned int *) buff; buff += 4; } } if (len & 2) { result += *(unsigned short *) buff; buff += 2; } } if (len & 1) result += *buff; result = add32_with_carry(result>>32, result & 0xffffffff); if (unlikely(odd)) { result = from32to16(result); result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); } return result; } /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) * * returns a 32-bit number suitable for feeding into itself * or csum_tcpudp_magic * * this function must be called with even lengths, except * for the last fragment, which may be odd * * it's best to have buff aligned on a 64-bit boundary */ __wsum csum_partial(const void *buff, int len, __wsum sum) { return (__force __wsum)add32_with_carry(do_csum(buff, len), (__force u32)sum); } /* * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ __sum16 ip_compute_csum(const void *buff, int len) { return csum_fold(csum_partial(buff,len,0)); } EXPORT_SYMBOL(ip_compute_csum);
gpl-2.0
ribalda/linux-old
arch/x86/lib/csum-partial_64.c
12281
3531
/* * arch/x86_64/lib/csum-partial.c * * This file contains network checksum routines that are better done * in an architecture-specific manner due to speed. */ #include <linux/compiler.h> #include <linux/module.h> #include <asm/checksum.h> static inline unsigned short from32to16(unsigned a) { unsigned short b = a >> 16; asm("addw %w2,%w0\n\t" "adcw $0,%w0\n" : "=r" (b) : "0" (b), "r" (a)); return b; } /* * Do a 64-bit checksum on an arbitrary memory area. * Returns a 32bit checksum. * * This isn't as time critical as it used to be because many NICs * do hardware checksumming these days. * * Things tried and found to not make it faster: * Manual Prefetching * Unrolling to an 128 bytes inner loop. * Using interleaving with more registers to break the carry chains. */ static unsigned do_csum(const unsigned char *buff, unsigned len) { unsigned odd, count; unsigned long result = 0; if (unlikely(len == 0)) return result; odd = 1 & (unsigned long) buff; if (unlikely(odd)) { result = *buff << 8; len--; buff++; } count = len >> 1; /* nr of 16-bit words.. */ if (count) { if (2 & (unsigned long) buff) { result += *(unsigned short *)buff; count--; len -= 2; buff += 2; } count >>= 1; /* nr of 32-bit words.. */ if (count) { unsigned long zero; unsigned count64; if (4 & (unsigned long) buff) { result += *(unsigned int *) buff; count--; len -= 4; buff += 4; } count >>= 1; /* nr of 64-bit words.. */ /* main loop using 64byte blocks */ zero = 0; count64 = count >> 3; while (count64) { asm("addq 0*8(%[src]),%[res]\n\t" "adcq 1*8(%[src]),%[res]\n\t" "adcq 2*8(%[src]),%[res]\n\t" "adcq 3*8(%[src]),%[res]\n\t" "adcq 4*8(%[src]),%[res]\n\t" "adcq 5*8(%[src]),%[res]\n\t" "adcq 6*8(%[src]),%[res]\n\t" "adcq 7*8(%[src]),%[res]\n\t" "adcq %[zero],%[res]" : [res] "=r" (result) : [src] "r" (buff), [zero] "r" (zero), "[res]" (result)); buff += 64; count64--; } /* last up to 7 8byte blocks */ count %= 8; while (count) { asm("addq %1,%0\n\t" "adcq %2,%0\n" : "=r" (result) : "m" (*(unsigned long *)buff), "r" (zero), "0" (result)); --count; buff += 8; } result = add32_with_carry(result>>32, result&0xffffffff); if (len & 4) { result += *(unsigned int *) buff; buff += 4; } } if (len & 2) { result += *(unsigned short *) buff; buff += 2; } } if (len & 1) result += *buff; result = add32_with_carry(result>>32, result & 0xffffffff); if (unlikely(odd)) { result = from32to16(result); result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); } return result; } /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) * * returns a 32-bit number suitable for feeding into itself * or csum_tcpudp_magic * * this function must be called with even lengths, except * for the last fragment, which may be odd * * it's best to have buff aligned on a 64-bit boundary */ __wsum csum_partial(const void *buff, int len, __wsum sum) { return (__force __wsum)add32_with_carry(do_csum(buff, len), (__force u32)sum); } /* * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ __sum16 ip_compute_csum(const void *buff, int len) { return csum_fold(csum_partial(buff,len,0)); } EXPORT_SYMBOL(ip_compute_csum);
gpl-2.0
rassillon/android_kernel_samsung_grandneove3g
arch/x86/kernel/crash_dump_64.c
12537
1327
/* * Memory preserving reboot related code. * * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) * Copyright (C) IBM Corporation, 2004. All rights reserved */ #include <linux/errno.h> #include <linux/crash_dump.h> #include <linux/uaccess.h> #include <linux/io.h> /** * copy_oldmem_page - copy one page from "oldmem" * @pfn: page frame number to be copied * @buf: target memory address for the copy; this can be in kernel address * space or user address space (see @userbuf) * @csize: number of bytes to copy * @offset: offset in bytes into the page (based on pfn) to begin the copy * @userbuf: if set, @buf is in user address space, use copy_to_user(), * otherwise @buf is in kernel address space, use memcpy(). * * Copy a page from "oldmem". For this page, there is no pte mapped * in the current kernel. We stitch up a pte, similar to kmap_atomic. */ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf) { void *vaddr; if (!csize) return 0; vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); if (!vaddr) return -ENOMEM; if (userbuf) { if (copy_to_user(buf, vaddr + offset, csize)) { iounmap(vaddr); return -EFAULT; } } else memcpy(buf, vaddr + offset, csize); set_iounmap_nonlazy(); iounmap(vaddr); return csize; }
gpl-2.0
hampsterblade/android_kernel_samsung_galaxys4g
drivers/net/wireless/wl12xx/wl1251_spi.c
762
7328
/* * This file is part of wl1251 * * Copyright (C) 2008 Nokia Corporation * * Contact: Kalle Valo <kalle.valo@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/irq.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/crc7.h> #include <linux/spi/spi.h> #include <linux/spi/wl12xx.h> #include "wl1251.h" #include "wl1251_reg.h" #include "wl1251_spi.h" static irqreturn_t wl1251_irq(int irq, void *cookie) { struct wl1251 *wl; wl1251_debug(DEBUG_IRQ, "IRQ"); wl = cookie; ieee80211_queue_work(wl->hw, &wl->irq_work); return IRQ_HANDLED; } static struct spi_device *wl_to_spi(struct wl1251 *wl) { return wl->if_priv; } static void wl1251_spi_reset(struct wl1251 *wl) { u8 *cmd; struct spi_transfer t; struct spi_message m; cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); if (!cmd) { wl1251_error("could not allocate cmd for spi reset"); return; } memset(&t, 0, sizeof(t)); spi_message_init(&m); memset(cmd, 0xff, WSPI_INIT_CMD_LEN); t.tx_buf = cmd; t.len = WSPI_INIT_CMD_LEN; spi_message_add_tail(&t, &m); spi_sync(wl_to_spi(wl), &m); wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); } static void wl1251_spi_wake(struct wl1251 *wl) { u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd; struct spi_transfer t; struct spi_message m; cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); if (!cmd) { wl1251_error("could not allocate cmd for spi init"); return; } memset(crc, 0, sizeof(crc)); memset(&t, 0, sizeof(t)); spi_message_init(&m); /* * Set WSPI_INIT_COMMAND * the data is being send from the MSB to LSB */ cmd[2] = 0xff; cmd[3] = 0xff; cmd[1] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; cmd[0] = 0; cmd[7] = 0; cmd[6] |= HW_ACCESS_WSPI_INIT_CMD_MASK << 3; cmd[6] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) cmd[5] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; else cmd[5] |= WSPI_INIT_CMD_EN_FIXEDBUSY; cmd[5] |= WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS; crc[0] = cmd[1]; crc[1] = cmd[0]; crc[2] = cmd[7]; crc[3] = cmd[6]; crc[4] = cmd[5]; cmd[4] |= crc7(0, crc, WSPI_INIT_CMD_CRC_LEN) << 1; cmd[4] |= WSPI_INIT_CMD_END; t.tx_buf = cmd; t.len = WSPI_INIT_CMD_LEN; spi_message_add_tail(&t, &m); spi_sync(wl_to_spi(wl), &m); wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); } static void wl1251_spi_reset_wake(struct wl1251 *wl) { wl1251_spi_reset(wl); wl1251_spi_wake(wl); } static void wl1251_spi_read(struct wl1251 *wl, int addr, void *buf, size_t len) { struct spi_transfer t[3]; struct spi_message m; u8 *busy_buf; u32 *cmd; cmd = &wl->buffer_cmd; busy_buf = wl->buffer_busyword; *cmd = 0; *cmd |= WSPI_CMD_READ; *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; *cmd |= addr & WSPI_CMD_BYTE_ADDR; spi_message_init(&m); memset(t, 0, sizeof(t)); t[0].tx_buf = cmd; t[0].len = 4; spi_message_add_tail(&t[0], &m); /* Busy and non busy words read */ t[1].rx_buf = busy_buf; t[1].len = WL1251_BUSY_WORD_LEN; spi_message_add_tail(&t[1], &m); t[2].rx_buf = buf; t[2].len = len; spi_message_add_tail(&t[2], &m); spi_sync(wl_to_spi(wl), &m); /* FIXME: check busy words */ wl1251_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); wl1251_dump(DEBUG_SPI, "spi_read buf <- ", buf, len); } static void wl1251_spi_write(struct wl1251 *wl, int addr, void *buf, size_t len) { struct spi_transfer t[2]; struct spi_message m; u32 *cmd; cmd = &wl->buffer_cmd; *cmd = 0; *cmd |= WSPI_CMD_WRITE; *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH; *cmd |= addr & WSPI_CMD_BYTE_ADDR; spi_message_init(&m); memset(t, 0, sizeof(t)); t[0].tx_buf = cmd; t[0].len = sizeof(*cmd); spi_message_add_tail(&t[0], &m); t[1].tx_buf = buf; t[1].len = len; spi_message_add_tail(&t[1], &m); spi_sync(wl_to_spi(wl), &m); wl1251_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); wl1251_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); } static void wl1251_spi_enable_irq(struct wl1251 *wl) { return enable_irq(wl->irq); } static void wl1251_spi_disable_irq(struct wl1251 *wl) { return disable_irq(wl->irq); } static const struct wl1251_if_operations wl1251_spi_ops = { .read = wl1251_spi_read, .write = wl1251_spi_write, .reset = wl1251_spi_reset_wake, .enable_irq = wl1251_spi_enable_irq, .disable_irq = wl1251_spi_disable_irq, }; static int __devinit wl1251_spi_probe(struct spi_device *spi) { struct wl12xx_platform_data *pdata; struct ieee80211_hw *hw; struct wl1251 *wl; int ret; pdata = spi->dev.platform_data; if (!pdata) { wl1251_error("no platform data"); return -ENODEV; } hw = wl1251_alloc_hw(); if (IS_ERR(hw)) return PTR_ERR(hw); wl = hw->priv; SET_IEEE80211_DEV(hw, &spi->dev); dev_set_drvdata(&spi->dev, wl); wl->if_priv = spi; wl->if_ops = &wl1251_spi_ops; /* This is the only SPI value that we need to set here, the rest * comes from the board-peripherals file */ spi->bits_per_word = 32; ret = spi_setup(spi); if (ret < 0) { wl1251_error("spi_setup failed"); goto out_free; } wl->set_power = pdata->set_power; if (!wl->set_power) { wl1251_error("set power function missing in platform data"); return -ENODEV; } wl->irq = spi->irq; if (wl->irq < 0) { wl1251_error("irq missing in platform data"); return -ENODEV; } wl->use_eeprom = pdata->use_eeprom; ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl); if (ret < 0) { wl1251_error("request_irq() failed: %d", ret); goto out_free; } set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); disable_irq(wl->irq); ret = wl1251_init_ieee80211(wl); if (ret) goto out_irq; return 0; out_irq: free_irq(wl->irq, wl); out_free: ieee80211_free_hw(hw); return ret; } static int __devexit wl1251_spi_remove(struct spi_device *spi) { struct wl1251 *wl = dev_get_drvdata(&spi->dev); free_irq(wl->irq, wl); wl1251_free_hw(wl); return 0; } static struct spi_driver wl1251_spi_driver = { .driver = { .name = DRIVER_NAME, .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = wl1251_spi_probe, .remove = __devexit_p(wl1251_spi_remove), }; static int __init wl1251_spi_init(void) { int ret; ret = spi_register_driver(&wl1251_spi_driver); if (ret < 0) { wl1251_error("failed to register spi driver: %d", ret); goto out; } out: return ret; } static void __exit wl1251_spi_exit(void) { spi_unregister_driver(&wl1251_spi_driver); wl1251_notice("unloaded"); } module_init(wl1251_spi_init); module_exit(wl1251_spi_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
gpl-2.0
Asure/Dropad-kernel-2.6.35.7
net/dccp/ipv4.c
762
28496
/* * net/dccp/ipv4.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/dccp.h> #include <linux/icmp.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/random.h> #include <net/icmp.h> #include <net/inet_common.h> #include <net/inet_hashtables.h> #include <net/inet_sock.h> #include <net/protocol.h> #include <net/sock.h> #include <net/timewait_sock.h> #include <net/tcp_states.h> #include <net/xfrm.h> #include "ackvec.h" #include "ccid.h" #include "dccp.h" #include "feat.h" /* * The per-net dccp.v4_ctl_sk socket is used for responding to * the Out-of-the-blue (OOTB) packets. A control sock will be created * for this socket at the initialization time. */ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct dccp_sock *dp = dccp_sk(sk); const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct rtable *rt; __be32 daddr, nexthop; int tmp; int err; dp->dccps_role = DCCP_ROLE_CLIENT; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; nexthop = daddr = usin->sin_addr.s_addr; if (inet->opt != NULL && inet->opt->srr) { if (daddr == 0) return -EINVAL; nexthop = inet->opt->faddr; } tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, IPPROTO_DCCP, inet->inet_sport, usin->sin_port, sk, 1); if (tmp < 0) return tmp; if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); return -ENETUNREACH; } if (inet->opt == NULL || !inet->opt->srr) daddr = rt->rt_dst; if (inet->inet_saddr == 0) inet->inet_saddr = rt->rt_src; inet->inet_rcv_saddr = inet->inet_saddr; inet->inet_dport = usin->sin_port; inet->inet_daddr = daddr; inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet->opt != NULL) inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; /* * Socket identity is still unknown (sport may be zero). * However we set state to DCCP_REQUESTING and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this. */ dccp_set_state(sk, DCCP_REQUESTING); err = inet_hash_connect(&dccp_death_row, sk); if (err != 0) goto failure; err = ip_route_newports(&rt, IPPROTO_DCCP, inet->inet_sport, inet->inet_dport, sk); if (err != 0) goto failure; /* OK, now commit destination to socket. */ sk_setup_caps(sk, &rt->u.dst); dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr, inet->inet_daddr, inet->inet_sport, inet->inet_dport); inet->inet_id = dp->dccps_iss ^ jiffies; err = dccp_connect(sk); rt = NULL; if (err != 0) goto failure; out: return err; failure: /* * This unhashes the socket and releases the local port, if necessary. */ dccp_set_state(sk, DCCP_CLOSED); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; goto out; } EXPORT_SYMBOL_GPL(dccp_v4_connect); /* * This routine does path mtu discovery as defined in RFC1191. */ static inline void dccp_do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu) { struct dst_entry *dst; const struct inet_sock *inet = inet_sk(sk); const struct dccp_sock *dp = dccp_sk(sk); /* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs * send out by Linux are always < 576bytes so they should go through * unfragmented). */ if (sk->sk_state == DCCP_LISTEN) return; /* We don't check in the destentry if pmtu discovery is forbidden * on this route. We just assume that no packet_to_big packets * are send back when pmtu discovery is not active. * There is a small race when the user changes this flag in the * route, but I think that's acceptable. */ if ((dst = __sk_dst_check(sk, 0)) == NULL) return; dst->ops->update_pmtu(dst, mtu); /* Something is about to be wrong... Remember soft error * for the case, if this connection will not able to recover. */ if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) sk->sk_err_soft = EMSGSIZE; mtu = dst_mtu(dst); if (inet->pmtudisc != IP_PMTUDISC_DONT && inet_csk(sk)->icsk_pmtu_cookie > mtu) { dccp_sync_mss(sk, mtu); /* * From RFC 4340, sec. 14.1: * * DCCP-Sync packets are the best choice for upward * probing, since DCCP-Sync probes do not risk application * data loss. */ dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC); } /* else let the usual retransmit timer handle it */ } /* * This routine is called by the ICMP module when it gets some sort of error * condition. If err < 0 then the socket should be closed and the error * returned to the user. If err > 0 it's just the icmp type << 8 | icmp code. * After adjustment header points to the first 8 bytes of the tcp header. We * need to find the appropriate port. * * The locking strategy used here is very "optimistic". When someone else * accesses the socket the ICMP is just dropped and for some paths there is no * check at all. A more general error queue to queue errors for later handling * is probably better. */ static void dccp_v4_err(struct sk_buff *skb, u32 info) { const struct iphdr *iph = (struct iphdr *)skb->data; const u8 offset = iph->ihl << 2; const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); struct dccp_sock *dp; struct inet_sock *inet; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; __u64 seq; int err; struct net *net = dev_net(skb->dev); if (skb->len < offset + sizeof(*dh) || skb->len < offset + __dccp_basic_hdr_len(dh)) { ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); return; } sk = inet_lookup(net, &dccp_hashinfo, iph->daddr, dh->dccph_dport, iph->saddr, dh->dccph_sport, inet_iif(skb)); if (sk == NULL) { ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); return; } if (sk->sk_state == DCCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. */ if (sock_owned_by_user(sk)) NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == DCCP_CLOSED) goto out; dp = dccp_sk(sk); seq = dccp_hdr_seq(dh); if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && !between48(seq, dp->dccps_awl, dp->dccps_awh)) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } switch (type) { case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ goto out; case ICMP_PARAMETERPROB: err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ if (!sock_owned_by_user(sk)) dccp_do_pmtu_discovery(sk, iph, info); goto out; } err = icmp_err_convert[code].errno; break; case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; default: goto out; } switch (sk->sk_state) { struct request_sock *req , **prev; case DCCP_LISTEN: if (sock_owned_by_user(sk)) goto out; req = inet_csk_search_req(sk, &prev, dh->dccph_dport, iph->daddr, iph->saddr); if (!req) goto out; /* * ICMPs are not backlogged, hence we cannot get an established * socket here. */ WARN_ON(req->sk); if (seq != dccp_rsk(req)->dreq_iss) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } /* * Still in RESPOND, just remove it silently. * There is no good way to pass the error to the newly * created socket, and POSIX does not want network * errors returned from accept(). */ inet_csk_reqsk_queue_drop(sk, req, prev); goto out; case DCCP_REQUESTING: case DCCP_RESPOND: if (!sock_owned_by_user(sk)) { DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); sk->sk_err = err; sk->sk_error_report(sk); dccp_done(sk); } else sk->sk_err_soft = err; goto out; } /* If we've already connected we will keep trying * until we time out, or the user gives up. * * rfc1122 4.2.3.9 allows to consider as hard errors * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, * but it is obsoleted by pmtu discovery). * * Note, that in modern internet, where routing is unreliable * and in each dark corner broken firewalls sit, sending random * errors ordered by their masters even this two messages finally lose * their original sense (even Linux sends invalid PORT_UNREACHs) * * Now we are in compliance with RFCs. * --ANK (980905) */ inet = inet_sk(sk); if (!sock_owned_by_user(sk) && inet->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else /* Only an error on timeout */ sk->sk_err_soft = err; out: bh_unlock_sock(sk); sock_put(sk); } static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb, __be32 src, __be32 dst) { return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum); } void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb) { const struct inet_sock *inet = inet_sk(sk); struct dccp_hdr *dh = dccp_hdr(skb); dccp_csum_outgoing(skb); dh->dccph_checksum = dccp_v4_csum_finish(skb, inet->inet_saddr, inet->inet_daddr); } EXPORT_SYMBOL_GPL(dccp_v4_send_check); static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb) { return secure_dccp_sequence_number(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, dccp_hdr(skb)->dccph_dport, dccp_hdr(skb)->dccph_sport); } /* * The three way handshake has completed - we got a valid ACK or DATAACK - * now create the new socket. * * This is the equivalent of TCP's tcp_v4_syn_recv_sock */ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst) { struct inet_request_sock *ireq; struct inet_sock *newinet; struct sock *newsk; if (sk_acceptq_is_full(sk)) goto exit_overflow; if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL) goto exit; newsk = dccp_create_openreq_child(sk, req, skb); if (newsk == NULL) goto exit; sk_setup_caps(newsk, dst); newinet = inet_sk(newsk); ireq = inet_rsk(req); newinet->inet_daddr = ireq->rmt_addr; newinet->inet_rcv_saddr = ireq->loc_addr; newinet->inet_saddr = ireq->loc_addr; newinet->opt = ireq->opt; ireq->opt = NULL; newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; newinet->inet_id = jiffies; dccp_sync_mss(newsk, dst_mtu(dst)); __inet_hash_nolisten(newsk, NULL); __inet_inherit_port(sk, newsk); return newsk; exit_overflow: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); exit: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); dst_release(dst); return NULL; } EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock); static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) { const struct dccp_hdr *dh = dccp_hdr(skb); const struct iphdr *iph = ip_hdr(skb); struct sock *nsk; struct request_sock **prev; /* Find possible connection requests. */ struct request_sock *req = inet_csk_search_req(sk, &prev, dh->dccph_sport, iph->saddr, iph->daddr); if (req != NULL) return dccp_check_req(sk, skb, req, prev); nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo, iph->saddr, dh->dccph_sport, iph->daddr, dh->dccph_dport, inet_iif(skb)); if (nsk != NULL) { if (nsk->sk_state != DCCP_TIME_WAIT) { bh_lock_sock(nsk); return nsk; } inet_twsk_put(inet_twsk(nsk)); return NULL; } return sk; } static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, struct sk_buff *skb) { struct rtable *rt; struct flowi fl = { .oif = skb_rtable(skb)->rt_iif, .nl_u = { .ip4_u = { .daddr = ip_hdr(skb)->saddr, .saddr = ip_hdr(skb)->daddr, .tos = RT_CONN_FLAGS(sk) } }, .proto = sk->sk_protocol, .uli_u = { .ports = { .sport = dccp_hdr(skb)->dccph_dport, .dport = dccp_hdr(skb)->dccph_sport } } }; security_skb_classify_flow(skb, &fl); if (ip_route_output_flow(net, &rt, &fl, sk, 0)) { IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); return NULL; } return &rt->u.dst; } static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, struct request_values *rv_unused) { int err = -1; struct sk_buff *skb; struct dst_entry *dst; dst = inet_csk_route_req(sk, req); if (dst == NULL) goto out; skb = dccp_make_response(sk, dst, req); if (skb != NULL) { const struct inet_request_sock *ireq = inet_rsk(req); struct dccp_hdr *dh = dccp_hdr(skb); dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr, ireq->rmt_addr); err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, ireq->rmt_addr, ireq->opt); err = net_xmit_eval(err); } out: dst_release(dst); return err; } static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) { int err; const struct iphdr *rxiph; struct sk_buff *skb; struct dst_entry *dst; struct net *net = dev_net(skb_dst(rxskb)->dev); struct sock *ctl_sk = net->dccp.v4_ctl_sk; /* Never send a reset in response to a reset. */ if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) return; if (skb_rtable(rxskb)->rt_type != RTN_LOCAL) return; dst = dccp_v4_route_skb(net, ctl_sk, rxskb); if (dst == NULL) return; skb = dccp_ctl_make_reset(ctl_sk, rxskb); if (skb == NULL) goto out; rxiph = ip_hdr(rxskb); dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr, rxiph->daddr); skb_dst_set(skb, dst_clone(dst)); bh_lock_sock(ctl_sk); err = ip_build_and_send_pkt(skb, ctl_sk, rxiph->daddr, rxiph->saddr, NULL); bh_unlock_sock(ctl_sk); if (net_xmit_eval(err) == 0) { DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); } out: dst_release(dst); } static void dccp_v4_reqsk_destructor(struct request_sock *req) { dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); kfree(inet_rsk(req)->opt); } static struct request_sock_ops dccp_request_sock_ops __read_mostly = { .family = PF_INET, .obj_size = sizeof(struct dccp_request_sock), .rtx_syn_ack = dccp_v4_send_response, .send_ack = dccp_reqsk_send_ack, .destructor = dccp_v4_reqsk_destructor, .send_reset = dccp_v4_ctl_send_reset, }; int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { struct inet_request_sock *ireq; struct request_sock *req; struct dccp_request_sock *dreq; const __be32 service = dccp_hdr_request(skb)->dccph_req_service; struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) return 0; /* discard, don't send a reset here */ if (dccp_bad_service_code(sk, service)) { dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; goto drop; } /* * TW buckets are converted to open requests without * limitations, they conserve resources and peer is * evidently real one. */ dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; if (inet_csk_reqsk_queue_is_full(sk)) goto drop; /* * Accept backlog is full. If we have already queued enough * of warm entries in syn queue, drop request. It is better than * clogging syn queue with openreqs with exponentially increasing * timeout. */ if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; req = inet_reqsk_alloc(&dccp_request_sock_ops); if (req == NULL) goto drop; if (dccp_reqsk_init(req, dccp_sk(sk), skb)) goto drop_and_free; dreq = dccp_rsk(req); if (dccp_parse_options(sk, dreq, skb)) goto drop_and_free; if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; ireq = inet_rsk(req); ireq->loc_addr = ip_hdr(skb)->daddr; ireq->rmt_addr = ip_hdr(skb)->saddr; /* * Step 3: Process LISTEN state * * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie * * In fact we defer setting S.GSR, S.SWL, S.SWH to * dccp_create_openreq_child. */ dreq->dreq_isr = dcb->dccpd_seq; dreq->dreq_iss = dccp_v4_init_sequence(skb); dreq->dreq_service = service; if (dccp_v4_send_response(sk, req, NULL)) goto drop_and_free; inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); return 0; drop_and_free: reqsk_free(req); drop: DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); return -1; } EXPORT_SYMBOL_GPL(dccp_v4_conn_request); int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) { struct dccp_hdr *dh = dccp_hdr(skb); if (sk->sk_state == DCCP_OPEN) { /* Fast path */ if (dccp_rcv_established(sk, skb, dh, skb->len)) goto reset; return 0; } /* * Step 3: Process LISTEN state * If P.type == Request or P contains a valid Init Cookie option, * (* Must scan the packet's options to check for Init * Cookies. Only Init Cookies are processed here, * however; other options are processed in Step 8. This * scan need only be performed if the endpoint uses Init * Cookies *) * (* Generate a new socket and switch to that socket *) * Set S := new socket for this port pair * S.state = RESPOND * Choose S.ISS (initial seqno) or set from Init Cookies * Initialize S.GAR := S.ISS * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies * Continue with S.state == RESPOND * (* A Response packet will be generated in Step 11 *) * Otherwise, * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return * * NOTE: the check for the packet types is done in * dccp_rcv_state_process */ if (sk->sk_state == DCCP_LISTEN) { struct sock *nsk = dccp_v4_hnd_req(sk, skb); if (nsk == NULL) goto discard; if (nsk != sk) { if (dccp_child_process(sk, nsk, skb)) goto reset; return 0; } } if (dccp_rcv_state_process(sk, skb, dh, skb->len)) goto reset; return 0; reset: dccp_v4_ctl_send_reset(sk, skb); discard: kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(dccp_v4_do_rcv); /** * dccp_invalid_packet - check for malformed packets * Implements RFC 4340, 8.5: Step 1: Check header basics * Packets that fail these checks are ignored and do not receive Resets. */ int dccp_invalid_packet(struct sk_buff *skb) { const struct dccp_hdr *dh; unsigned int cscov; if (skb->pkt_type != PACKET_HOST) return 1; /* If the packet is shorter than 12 bytes, drop packet and return */ if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) { DCCP_WARN("pskb_may_pull failed\n"); return 1; } dh = dccp_hdr(skb); /* If P.type is not understood, drop packet and return */ if (dh->dccph_type >= DCCP_PKT_INVALID) { DCCP_WARN("invalid packet type\n"); return 1; } /* * If P.Data Offset is too small for packet type, drop packet and return */ if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff); return 1; } /* * If P.Data Offset is too too large for packet, drop packet and return */ if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff); return 1; } /* * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet * has short sequence numbers), drop packet and return */ if ((dh->dccph_type < DCCP_PKT_DATA || dh->dccph_type > DCCP_PKT_DATAACK) && dh->dccph_x == 0) { DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n", dccp_packet_name(dh->dccph_type)); return 1; } /* * If P.CsCov is too large for the packet size, drop packet and return. * This must come _before_ checksumming (not as RFC 4340 suggests). */ cscov = dccp_csum_coverage(skb); if (cscov > skb->len) { DCCP_WARN("P.CsCov %u exceeds packet length %d\n", dh->dccph_cscov, skb->len); return 1; } /* If header checksum is incorrect, drop packet and return. * (This step is completed in the AF-dependent functions.) */ skb->csum = skb_checksum(skb, 0, cscov, 0); return 0; } EXPORT_SYMBOL_GPL(dccp_invalid_packet); /* this is called when real data arrives */ static int dccp_v4_rcv(struct sk_buff *skb) { const struct dccp_hdr *dh; const struct iphdr *iph; struct sock *sk; int min_cov; /* Step 1: Check header basics */ if (dccp_invalid_packet(skb)) goto discard_it; iph = ip_hdr(skb); /* Step 1: If header checksum is incorrect, drop packet and return */ if (dccp_v4_csum_finish(skb, iph->saddr, iph->daddr)) { DCCP_WARN("dropped packet with invalid checksum\n"); goto discard_it; } dh = dccp_hdr(skb); DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh); DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; dccp_pr_debug("%8.8s src=%pI4@%-5d dst=%pI4@%-5d seq=%llu", dccp_packet_name(dh->dccph_type), &iph->saddr, ntohs(dh->dccph_sport), &iph->daddr, ntohs(dh->dccph_dport), (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq); if (dccp_packet_without_ack(skb)) { DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ; dccp_pr_debug_cat("\n"); } else { DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); dccp_pr_debug_cat(", ack=%llu\n", (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq); } /* Step 2: * Look up flow ID in table and get corresponding socket */ sk = __inet_lookup_skb(&dccp_hashinfo, skb, dh->dccph_sport, dh->dccph_dport); /* * Step 2: * If no socket ... */ if (sk == NULL) { dccp_pr_debug("failed to look up flow ID in table and " "get corresponding socket\n"); goto no_dccp_socket; } /* * Step 2: * ... or S.state == TIMEWAIT, * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return */ if (sk->sk_state == DCCP_TIME_WAIT) { dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); inet_twsk_put(inet_twsk(sk)); goto no_dccp_socket; } /* * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage * o if MinCsCov = 0, only packets with CsCov = 0 are accepted * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov */ min_cov = dccp_sk(sk)->dccps_pcrlen; if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n", dh->dccph_cscov, min_cov); /* FIXME: "Such packets SHOULD be reported using Data Dropped * options (Section 11.7) with Drop Code 0, Protocol * Constraints." */ goto discard_and_relse; } if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; nf_reset(skb); return sk_receive_skb(sk, skb, 1); no_dccp_socket: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; /* * Step 2: * If no socket ... * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return */ if (dh->dccph_type != DCCP_PKT_RESET) { DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; dccp_v4_ctl_send_reset(sk, skb); } discard_it: kfree_skb(skb); return 0; discard_and_relse: sock_put(sk); goto discard_it; } static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { .queue_xmit = ip_queue_xmit, .send_check = dccp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .conn_request = dccp_v4_conn_request, .syn_recv_sock = dccp_v4_request_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), .bind_conflict = inet_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, .compat_getsockopt = compat_ip_getsockopt, #endif }; static int dccp_v4_init_sock(struct sock *sk) { static __u8 dccp_v4_ctl_sock_initialized; int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized); if (err == 0) { if (unlikely(!dccp_v4_ctl_sock_initialized)) dccp_v4_ctl_sock_initialized = 1; inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops; } return err; } static struct timewait_sock_ops dccp_timewait_sock_ops = { .twsk_obj_size = sizeof(struct inet_timewait_sock), }; static struct proto dccp_v4_prot = { .name = "DCCP", .owner = THIS_MODULE, .close = dccp_close, .connect = dccp_v4_connect, .disconnect = dccp_disconnect, .ioctl = dccp_ioctl, .init = dccp_v4_init_sock, .setsockopt = dccp_setsockopt, .getsockopt = dccp_getsockopt, .sendmsg = dccp_sendmsg, .recvmsg = dccp_recvmsg, .backlog_rcv = dccp_v4_do_rcv, .hash = inet_hash, .unhash = inet_unhash, .accept = inet_csk_accept, .get_port = inet_csk_get_port, .shutdown = dccp_shutdown, .destroy = dccp_destroy_sock, .orphan_count = &dccp_orphan_count, .max_header = MAX_DCCP_HEADER, .obj_size = sizeof(struct dccp_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .rsk_prot = &dccp_request_sock_ops, .twsk_prot = &dccp_timewait_sock_ops, .h.hashinfo = &dccp_hashinfo, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_dccp_setsockopt, .compat_getsockopt = compat_dccp_getsockopt, #endif }; static const struct net_protocol dccp_v4_protocol = { .handler = dccp_v4_rcv, .err_handler = dccp_v4_err, .no_policy = 1, .netns_ok = 1, }; static const struct proto_ops inet_dccp_ops = { .family = PF_INET, .owner = THIS_MODULE, .release = inet_release, .bind = inet_bind, .connect = inet_stream_connect, .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet_getname, /* FIXME: work on tcp_poll to rename it to inet_csk_poll */ .poll = dccp_poll, .ioctl = inet_ioctl, /* FIXME: work on inet_listen to rename it to sock_common_listen */ .listen = inet_dccp_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static struct inet_protosw dccp_v4_protosw = { .type = SOCK_DCCP, .protocol = IPPROTO_DCCP, .prot = &dccp_v4_prot, .ops = &inet_dccp_ops, .no_check = 0, .flags = INET_PROTOSW_ICSK, }; static int __net_init dccp_v4_init_net(struct net *net) { if (dccp_hashinfo.bhash == NULL) return -ESOCKTNOSUPPORT; return inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET, SOCK_DCCP, IPPROTO_DCCP, net); } static void __net_exit dccp_v4_exit_net(struct net *net) { inet_ctl_sock_destroy(net->dccp.v4_ctl_sk); } static struct pernet_operations dccp_v4_ops = { .init = dccp_v4_init_net, .exit = dccp_v4_exit_net, }; static int __init dccp_v4_init(void) { int err = proto_register(&dccp_v4_prot, 1); if (err != 0) goto out; err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP); if (err != 0) goto out_proto_unregister; inet_register_protosw(&dccp_v4_protosw); err = register_pernet_subsys(&dccp_v4_ops); if (err) goto out_destroy_ctl_sock; out: return err; out_destroy_ctl_sock: inet_unregister_protosw(&dccp_v4_protosw); inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP); out_proto_unregister: proto_unregister(&dccp_v4_prot); goto out; } static void __exit dccp_v4_exit(void) { unregister_pernet_subsys(&dccp_v4_ops); inet_unregister_protosw(&dccp_v4_protosw); inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP); proto_unregister(&dccp_v4_prot); } module_init(dccp_v4_init); module_exit(dccp_v4_exit); /* * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33) * values directly, Also cover the case where the protocol is not specified, * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP */ MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 33, 6); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 0, 6); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
gpl-2.0
virtuous/kernel-7x30-gingerbread-v4
net/bridge/br_sysfs_br.c
762
21222
/* * Sysfs attributes of bridge ports * Linux ethernet bridge * * Authors: * Stephen Hemminger <shemminger@osdl.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/capability.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/if_bridge.h> #include <linux/rtnetlink.h> #include <linux/spinlock.h> #include <linux/times.h> #include "br_private.h" #define to_dev(obj) container_of(obj, struct device, kobj) #define to_bridge(cd) ((struct net_bridge *)netdev_priv(to_net_dev(cd))) /* * Common code for storing bridge parameters. */ static ssize_t store_bridge_parm(struct device *d, const char *buf, size_t len, int (*set)(struct net_bridge *, unsigned long)) { struct net_bridge *br = to_bridge(d); char *endp; unsigned long val; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; spin_lock_bh(&br->lock); err = (*set)(br, val); spin_unlock_bh(&br->lock); return err ? err : len; } static ssize_t show_forward_delay(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); } static int set_forward_delay(struct net_bridge *br, unsigned long val) { unsigned long delay = clock_t_to_jiffies(val); br->forward_delay = delay; if (br_is_root_bridge(br)) br->bridge_forward_delay = delay; return 0; } static ssize_t store_forward_delay(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_forward_delay); } static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR, show_forward_delay, store_forward_delay); static ssize_t show_hello_time(struct device *d, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", jiffies_to_clock_t(to_bridge(d)->hello_time)); } static int set_hello_time(struct net_bridge *br, unsigned long val) { unsigned long t = clock_t_to_jiffies(val); if (t < HZ) return -EINVAL; br->hello_time = t; if (br_is_root_bridge(br)) br->bridge_hello_time = t; return 0; } static ssize_t store_hello_time(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_hello_time); } static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time, store_hello_time); static ssize_t show_max_age(struct device *d, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", jiffies_to_clock_t(to_bridge(d)->max_age)); } static int set_max_age(struct net_bridge *br, unsigned long val) { unsigned long t = clock_t_to_jiffies(val); br->max_age = t; if (br_is_root_bridge(br)) br->bridge_max_age = t; return 0; } static ssize_t store_max_age(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_max_age); } static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age); static ssize_t show_ageing_time(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time)); } static int set_ageing_time(struct net_bridge *br, unsigned long val) { br->ageing_time = clock_t_to_jiffies(val); return 0; } static ssize_t store_ageing_time(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_ageing_time); } static DEVICE_ATTR(ageing_time, S_IRUGO | S_IWUSR, show_ageing_time, store_ageing_time); static ssize_t show_stp_state(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%d\n", br->stp_enabled); } static ssize_t store_stp_state(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { struct net_bridge *br = to_bridge(d); char *endp; unsigned long val; if (!capable(CAP_NET_ADMIN)) return -EPERM; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; if (!rtnl_trylock()) return restart_syscall(); br_stp_set_enabled(br, val); rtnl_unlock(); return len; } static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state, store_stp_state); static ssize_t show_priority(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%d\n", (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]); } static int set_priority(struct net_bridge *br, unsigned long val) { br_stp_set_bridge_priority(br, (u16) val); return 0; } static ssize_t store_priority(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_priority); } static DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, show_priority, store_priority); static ssize_t show_root_id(struct device *d, struct device_attribute *attr, char *buf) { return br_show_bridge_id(buf, &to_bridge(d)->designated_root); } static DEVICE_ATTR(root_id, S_IRUGO, show_root_id, NULL); static ssize_t show_bridge_id(struct device *d, struct device_attribute *attr, char *buf) { return br_show_bridge_id(buf, &to_bridge(d)->bridge_id); } static DEVICE_ATTR(bridge_id, S_IRUGO, show_bridge_id, NULL); static ssize_t show_root_port(struct device *d, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", to_bridge(d)->root_port); } static DEVICE_ATTR(root_port, S_IRUGO, show_root_port, NULL); static ssize_t show_root_path_cost(struct device *d, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", to_bridge(d)->root_path_cost); } static DEVICE_ATTR(root_path_cost, S_IRUGO, show_root_path_cost, NULL); static ssize_t show_topology_change(struct device *d, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", to_bridge(d)->topology_change); } static DEVICE_ATTR(topology_change, S_IRUGO, show_topology_change, NULL); static ssize_t show_topology_change_detected(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%d\n", br->topology_change_detected); } static DEVICE_ATTR(topology_change_detected, S_IRUGO, show_topology_change_detected, NULL); static ssize_t show_hello_timer(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%ld\n", br_timer_value(&br->hello_timer)); } static DEVICE_ATTR(hello_timer, S_IRUGO, show_hello_timer, NULL); static ssize_t show_tcn_timer(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%ld\n", br_timer_value(&br->tcn_timer)); } static DEVICE_ATTR(tcn_timer, S_IRUGO, show_tcn_timer, NULL); static ssize_t show_topology_change_timer(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%ld\n", br_timer_value(&br->topology_change_timer)); } static DEVICE_ATTR(topology_change_timer, S_IRUGO, show_topology_change_timer, NULL); static ssize_t show_gc_timer(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%ld\n", br_timer_value(&br->gc_timer)); } static DEVICE_ATTR(gc_timer, S_IRUGO, show_gc_timer, NULL); static ssize_t show_group_addr(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%x:%x:%x:%x:%x:%x\n", br->group_addr[0], br->group_addr[1], br->group_addr[2], br->group_addr[3], br->group_addr[4], br->group_addr[5]); } static ssize_t store_group_addr(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { struct net_bridge *br = to_bridge(d); unsigned new_addr[6]; int i; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (sscanf(buf, "%x:%x:%x:%x:%x:%x", &new_addr[0], &new_addr[1], &new_addr[2], &new_addr[3], &new_addr[4], &new_addr[5]) != 6) return -EINVAL; /* Must be 01:80:c2:00:00:0X */ for (i = 0; i < 5; i++) if (new_addr[i] != br_group_address[i]) return -EINVAL; if (new_addr[5] & ~0xf) return -EINVAL; if (new_addr[5] == 1 || /* 802.3x Pause address */ new_addr[5] == 2 || /* 802.3ad Slow protocols */ new_addr[5] == 3) /* 802.1X PAE address */ return -EINVAL; spin_lock_bh(&br->lock); for (i = 0; i < 6; i++) br->group_addr[i] = new_addr[i]; spin_unlock_bh(&br->lock); return len; } static DEVICE_ATTR(group_addr, S_IRUGO | S_IWUSR, show_group_addr, store_group_addr); static ssize_t store_flush(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { struct net_bridge *br = to_bridge(d); if (!capable(CAP_NET_ADMIN)) return -EPERM; br_fdb_flush(br); return len; } static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush); #ifdef CONFIG_BRIDGE_IGMP_SNOOPING static ssize_t show_multicast_router(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%d\n", br->multicast_router); } static ssize_t store_multicast_router(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, br_multicast_set_router); } static DEVICE_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router, store_multicast_router); static ssize_t show_multicast_snooping(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%d\n", !br->multicast_disabled); } static ssize_t store_multicast_snooping(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, br_multicast_toggle); } static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR, show_multicast_snooping, store_multicast_snooping); static ssize_t show_hash_elasticity(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%u\n", br->hash_elasticity); } static int set_elasticity(struct net_bridge *br, unsigned long val) { br->hash_elasticity = val; return 0; } static ssize_t store_hash_elasticity(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_elasticity); } static DEVICE_ATTR(hash_elasticity, S_IRUGO | S_IWUSR, show_hash_elasticity, store_hash_elasticity); static ssize_t show_hash_max(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%u\n", br->hash_max); } static ssize_t store_hash_max(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, br_multicast_set_hash_max); } static DEVICE_ATTR(hash_max, S_IRUGO | S_IWUSR, show_hash_max, store_hash_max); static ssize_t show_multicast_last_member_count(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%u\n", br->multicast_last_member_count); } static int set_last_member_count(struct net_bridge *br, unsigned long val) { br->multicast_last_member_count = val; return 0; } static ssize_t store_multicast_last_member_count(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_last_member_count); } static DEVICE_ATTR(multicast_last_member_count, S_IRUGO | S_IWUSR, show_multicast_last_member_count, store_multicast_last_member_count); static ssize_t show_multicast_startup_query_count( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%u\n", br->multicast_startup_query_count); } static int set_startup_query_count(struct net_bridge *br, unsigned long val) { br->multicast_startup_query_count = val; return 0; } static ssize_t store_multicast_startup_query_count( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_startup_query_count); } static DEVICE_ATTR(multicast_startup_query_count, S_IRUGO | S_IWUSR, show_multicast_startup_query_count, store_multicast_startup_query_count); static ssize_t show_multicast_last_member_interval( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->multicast_last_member_interval)); } static int set_last_member_interval(struct net_bridge *br, unsigned long val) { br->multicast_last_member_interval = clock_t_to_jiffies(val); return 0; } static ssize_t store_multicast_last_member_interval( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_last_member_interval); } static DEVICE_ATTR(multicast_last_member_interval, S_IRUGO | S_IWUSR, show_multicast_last_member_interval, store_multicast_last_member_interval); static ssize_t show_multicast_membership_interval( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->multicast_membership_interval)); } static int set_membership_interval(struct net_bridge *br, unsigned long val) { br->multicast_membership_interval = clock_t_to_jiffies(val); return 0; } static ssize_t store_multicast_membership_interval( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_membership_interval); } static DEVICE_ATTR(multicast_membership_interval, S_IRUGO | S_IWUSR, show_multicast_membership_interval, store_multicast_membership_interval); static ssize_t show_multicast_querier_interval(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->multicast_querier_interval)); } static int set_querier_interval(struct net_bridge *br, unsigned long val) { br->multicast_querier_interval = clock_t_to_jiffies(val); return 0; } static ssize_t store_multicast_querier_interval(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_querier_interval); } static DEVICE_ATTR(multicast_querier_interval, S_IRUGO | S_IWUSR, show_multicast_querier_interval, store_multicast_querier_interval); static ssize_t show_multicast_query_interval(struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->multicast_query_interval)); } static int set_query_interval(struct net_bridge *br, unsigned long val) { br->multicast_query_interval = clock_t_to_jiffies(val); return 0; } static ssize_t store_multicast_query_interval(struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_query_interval); } static DEVICE_ATTR(multicast_query_interval, S_IRUGO | S_IWUSR, show_multicast_query_interval, store_multicast_query_interval); static ssize_t show_multicast_query_response_interval( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf( buf, "%lu\n", jiffies_to_clock_t(br->multicast_query_response_interval)); } static int set_query_response_interval(struct net_bridge *br, unsigned long val) { br->multicast_query_response_interval = clock_t_to_jiffies(val); return 0; } static ssize_t store_multicast_query_response_interval( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_query_response_interval); } static DEVICE_ATTR(multicast_query_response_interval, S_IRUGO | S_IWUSR, show_multicast_query_response_interval, store_multicast_query_response_interval); static ssize_t show_multicast_startup_query_interval( struct device *d, struct device_attribute *attr, char *buf) { struct net_bridge *br = to_bridge(d); return sprintf( buf, "%lu\n", jiffies_to_clock_t(br->multicast_startup_query_interval)); } static int set_startup_query_interval(struct net_bridge *br, unsigned long val) { br->multicast_startup_query_interval = clock_t_to_jiffies(val); return 0; } static ssize_t store_multicast_startup_query_interval( struct device *d, struct device_attribute *attr, const char *buf, size_t len) { return store_bridge_parm(d, buf, len, set_startup_query_interval); } static DEVICE_ATTR(multicast_startup_query_interval, S_IRUGO | S_IWUSR, show_multicast_startup_query_interval, store_multicast_startup_query_interval); #endif static struct attribute *bridge_attrs[] = { &dev_attr_forward_delay.attr, &dev_attr_hello_time.attr, &dev_attr_max_age.attr, &dev_attr_ageing_time.attr, &dev_attr_stp_state.attr, &dev_attr_priority.attr, &dev_attr_bridge_id.attr, &dev_attr_root_id.attr, &dev_attr_root_path_cost.attr, &dev_attr_root_port.attr, &dev_attr_topology_change.attr, &dev_attr_topology_change_detected.attr, &dev_attr_hello_timer.attr, &dev_attr_tcn_timer.attr, &dev_attr_topology_change_timer.attr, &dev_attr_gc_timer.attr, &dev_attr_group_addr.attr, &dev_attr_flush.attr, #ifdef CONFIG_BRIDGE_IGMP_SNOOPING &dev_attr_multicast_router.attr, &dev_attr_multicast_snooping.attr, &dev_attr_hash_elasticity.attr, &dev_attr_hash_max.attr, &dev_attr_multicast_last_member_count.attr, &dev_attr_multicast_startup_query_count.attr, &dev_attr_multicast_last_member_interval.attr, &dev_attr_multicast_membership_interval.attr, &dev_attr_multicast_querier_interval.attr, &dev_attr_multicast_query_interval.attr, &dev_attr_multicast_query_response_interval.attr, &dev_attr_multicast_startup_query_interval.attr, #endif NULL }; static struct attribute_group bridge_group = { .name = SYSFS_BRIDGE_ATTR, .attrs = bridge_attrs, }; /* * Export the forwarding information table as a binary file * The records are struct __fdb_entry. * * Returns the number of bytes read. */ static ssize_t brforward_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = to_dev(kobj); struct net_bridge *br = to_bridge(dev); int n; /* must read whole records */ if (off % sizeof(struct __fdb_entry) != 0) return -EINVAL; n = br_fdb_fillbuf(br, buf, count / sizeof(struct __fdb_entry), off / sizeof(struct __fdb_entry)); if (n > 0) n *= sizeof(struct __fdb_entry); return n; } static struct bin_attribute bridge_forward = { .attr = { .name = SYSFS_BRIDGE_FDB, .mode = S_IRUGO, }, .read = brforward_read, }; /* * Add entries in sysfs onto the existing network class device * for the bridge. * Adds a attribute group "bridge" containing tuning parameters. * Binary attribute containing the forward table * Sub directory to hold links to interfaces. * * Note: the ifobj exists only to be a subdirectory * to hold links. The ifobj exists in same data structure * as it's parent the bridge so reference counting works. */ int br_sysfs_addbr(struct net_device *dev) { struct kobject *brobj = &dev->dev.kobj; struct net_bridge *br = netdev_priv(dev); int err; err = sysfs_create_group(brobj, &bridge_group); if (err) { pr_info("%s: can't create group %s/%s\n", __func__, dev->name, bridge_group.name); goto out1; } err = sysfs_create_bin_file(brobj, &bridge_forward); if (err) { pr_info("%s: can't create attribute file %s/%s\n", __func__, dev->name, bridge_forward.attr.name); goto out2; } br->ifobj = kobject_create_and_add(SYSFS_BRIDGE_PORT_SUBDIR, brobj); if (!br->ifobj) { pr_info("%s: can't add kobject (directory) %s/%s\n", __func__, dev->name, SYSFS_BRIDGE_PORT_SUBDIR); goto out3; } return 0; out3: sysfs_remove_bin_file(&dev->dev.kobj, &bridge_forward); out2: sysfs_remove_group(&dev->dev.kobj, &bridge_group); out1: return err; } void br_sysfs_delbr(struct net_device *dev) { struct kobject *kobj = &dev->dev.kobj; struct net_bridge *br = netdev_priv(dev); kobject_put(br->ifobj); sysfs_remove_bin_file(kobj, &bridge_forward); sysfs_remove_group(kobj, &bridge_group); }
gpl-2.0
alexnpage/HTC-Kingdom-Kernel
net/dccp/ipv4.c
762
28496
/* * net/dccp/ipv4.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/dccp.h> #include <linux/icmp.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/random.h> #include <net/icmp.h> #include <net/inet_common.h> #include <net/inet_hashtables.h> #include <net/inet_sock.h> #include <net/protocol.h> #include <net/sock.h> #include <net/timewait_sock.h> #include <net/tcp_states.h> #include <net/xfrm.h> #include "ackvec.h" #include "ccid.h" #include "dccp.h" #include "feat.h" /* * The per-net dccp.v4_ctl_sk socket is used for responding to * the Out-of-the-blue (OOTB) packets. A control sock will be created * for this socket at the initialization time. */ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct dccp_sock *dp = dccp_sk(sk); const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct rtable *rt; __be32 daddr, nexthop; int tmp; int err; dp->dccps_role = DCCP_ROLE_CLIENT; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; nexthop = daddr = usin->sin_addr.s_addr; if (inet->opt != NULL && inet->opt->srr) { if (daddr == 0) return -EINVAL; nexthop = inet->opt->faddr; } tmp = ip_route_connect(&rt, nexthop, inet->inet_saddr, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, IPPROTO_DCCP, inet->inet_sport, usin->sin_port, sk, 1); if (tmp < 0) return tmp; if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); return -ENETUNREACH; } if (inet->opt == NULL || !inet->opt->srr) daddr = rt->rt_dst; if (inet->inet_saddr == 0) inet->inet_saddr = rt->rt_src; inet->inet_rcv_saddr = inet->inet_saddr; inet->inet_dport = usin->sin_port; inet->inet_daddr = daddr; inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet->opt != NULL) inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; /* * Socket identity is still unknown (sport may be zero). * However we set state to DCCP_REQUESTING and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this. */ dccp_set_state(sk, DCCP_REQUESTING); err = inet_hash_connect(&dccp_death_row, sk); if (err != 0) goto failure; err = ip_route_newports(&rt, IPPROTO_DCCP, inet->inet_sport, inet->inet_dport, sk); if (err != 0) goto failure; /* OK, now commit destination to socket. */ sk_setup_caps(sk, &rt->u.dst); dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr, inet->inet_daddr, inet->inet_sport, inet->inet_dport); inet->inet_id = dp->dccps_iss ^ jiffies; err = dccp_connect(sk); rt = NULL; if (err != 0) goto failure; out: return err; failure: /* * This unhashes the socket and releases the local port, if necessary. */ dccp_set_state(sk, DCCP_CLOSED); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; goto out; } EXPORT_SYMBOL_GPL(dccp_v4_connect); /* * This routine does path mtu discovery as defined in RFC1191. */ static inline void dccp_do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu) { struct dst_entry *dst; const struct inet_sock *inet = inet_sk(sk); const struct dccp_sock *dp = dccp_sk(sk); /* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs * send out by Linux are always < 576bytes so they should go through * unfragmented). */ if (sk->sk_state == DCCP_LISTEN) return; /* We don't check in the destentry if pmtu discovery is forbidden * on this route. We just assume that no packet_to_big packets * are send back when pmtu discovery is not active. * There is a small race when the user changes this flag in the * route, but I think that's acceptable. */ if ((dst = __sk_dst_check(sk, 0)) == NULL) return; dst->ops->update_pmtu(dst, mtu); /* Something is about to be wrong... Remember soft error * for the case, if this connection will not able to recover. */ if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) sk->sk_err_soft = EMSGSIZE; mtu = dst_mtu(dst); if (inet->pmtudisc != IP_PMTUDISC_DONT && inet_csk(sk)->icsk_pmtu_cookie > mtu) { dccp_sync_mss(sk, mtu); /* * From RFC 4340, sec. 14.1: * * DCCP-Sync packets are the best choice for upward * probing, since DCCP-Sync probes do not risk application * data loss. */ dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC); } /* else let the usual retransmit timer handle it */ } /* * This routine is called by the ICMP module when it gets some sort of error * condition. If err < 0 then the socket should be closed and the error * returned to the user. If err > 0 it's just the icmp type << 8 | icmp code. * After adjustment header points to the first 8 bytes of the tcp header. We * need to find the appropriate port. * * The locking strategy used here is very "optimistic". When someone else * accesses the socket the ICMP is just dropped and for some paths there is no * check at all. A more general error queue to queue errors for later handling * is probably better. */ static void dccp_v4_err(struct sk_buff *skb, u32 info) { const struct iphdr *iph = (struct iphdr *)skb->data; const u8 offset = iph->ihl << 2; const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); struct dccp_sock *dp; struct inet_sock *inet; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; __u64 seq; int err; struct net *net = dev_net(skb->dev); if (skb->len < offset + sizeof(*dh) || skb->len < offset + __dccp_basic_hdr_len(dh)) { ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); return; } sk = inet_lookup(net, &dccp_hashinfo, iph->daddr, dh->dccph_dport, iph->saddr, dh->dccph_sport, inet_iif(skb)); if (sk == NULL) { ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); return; } if (sk->sk_state == DCCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. */ if (sock_owned_by_user(sk)) NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == DCCP_CLOSED) goto out; dp = dccp_sk(sk); seq = dccp_hdr_seq(dh); if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && !between48(seq, dp->dccps_awl, dp->dccps_awh)) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } switch (type) { case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ goto out; case ICMP_PARAMETERPROB: err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ if (!sock_owned_by_user(sk)) dccp_do_pmtu_discovery(sk, iph, info); goto out; } err = icmp_err_convert[code].errno; break; case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; default: goto out; } switch (sk->sk_state) { struct request_sock *req , **prev; case DCCP_LISTEN: if (sock_owned_by_user(sk)) goto out; req = inet_csk_search_req(sk, &prev, dh->dccph_dport, iph->daddr, iph->saddr); if (!req) goto out; /* * ICMPs are not backlogged, hence we cannot get an established * socket here. */ WARN_ON(req->sk); if (seq != dccp_rsk(req)->dreq_iss) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } /* * Still in RESPOND, just remove it silently. * There is no good way to pass the error to the newly * created socket, and POSIX does not want network * errors returned from accept(). */ inet_csk_reqsk_queue_drop(sk, req, prev); goto out; case DCCP_REQUESTING: case DCCP_RESPOND: if (!sock_owned_by_user(sk)) { DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); sk->sk_err = err; sk->sk_error_report(sk); dccp_done(sk); } else sk->sk_err_soft = err; goto out; } /* If we've already connected we will keep trying * until we time out, or the user gives up. * * rfc1122 4.2.3.9 allows to consider as hard errors * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, * but it is obsoleted by pmtu discovery). * * Note, that in modern internet, where routing is unreliable * and in each dark corner broken firewalls sit, sending random * errors ordered by their masters even this two messages finally lose * their original sense (even Linux sends invalid PORT_UNREACHs) * * Now we are in compliance with RFCs. * --ANK (980905) */ inet = inet_sk(sk); if (!sock_owned_by_user(sk) && inet->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else /* Only an error on timeout */ sk->sk_err_soft = err; out: bh_unlock_sock(sk); sock_put(sk); } static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb, __be32 src, __be32 dst) { return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum); } void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb) { const struct inet_sock *inet = inet_sk(sk); struct dccp_hdr *dh = dccp_hdr(skb); dccp_csum_outgoing(skb); dh->dccph_checksum = dccp_v4_csum_finish(skb, inet->inet_saddr, inet->inet_daddr); } EXPORT_SYMBOL_GPL(dccp_v4_send_check); static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb) { return secure_dccp_sequence_number(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, dccp_hdr(skb)->dccph_dport, dccp_hdr(skb)->dccph_sport); } /* * The three way handshake has completed - we got a valid ACK or DATAACK - * now create the new socket. * * This is the equivalent of TCP's tcp_v4_syn_recv_sock */ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst) { struct inet_request_sock *ireq; struct inet_sock *newinet; struct sock *newsk; if (sk_acceptq_is_full(sk)) goto exit_overflow; if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL) goto exit; newsk = dccp_create_openreq_child(sk, req, skb); if (newsk == NULL) goto exit; sk_setup_caps(newsk, dst); newinet = inet_sk(newsk); ireq = inet_rsk(req); newinet->inet_daddr = ireq->rmt_addr; newinet->inet_rcv_saddr = ireq->loc_addr; newinet->inet_saddr = ireq->loc_addr; newinet->opt = ireq->opt; ireq->opt = NULL; newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; newinet->inet_id = jiffies; dccp_sync_mss(newsk, dst_mtu(dst)); __inet_hash_nolisten(newsk, NULL); __inet_inherit_port(sk, newsk); return newsk; exit_overflow: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); exit: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); dst_release(dst); return NULL; } EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock); static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) { const struct dccp_hdr *dh = dccp_hdr(skb); const struct iphdr *iph = ip_hdr(skb); struct sock *nsk; struct request_sock **prev; /* Find possible connection requests. */ struct request_sock *req = inet_csk_search_req(sk, &prev, dh->dccph_sport, iph->saddr, iph->daddr); if (req != NULL) return dccp_check_req(sk, skb, req, prev); nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo, iph->saddr, dh->dccph_sport, iph->daddr, dh->dccph_dport, inet_iif(skb)); if (nsk != NULL) { if (nsk->sk_state != DCCP_TIME_WAIT) { bh_lock_sock(nsk); return nsk; } inet_twsk_put(inet_twsk(nsk)); return NULL; } return sk; } static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, struct sk_buff *skb) { struct rtable *rt; struct flowi fl = { .oif = skb_rtable(skb)->rt_iif, .nl_u = { .ip4_u = { .daddr = ip_hdr(skb)->saddr, .saddr = ip_hdr(skb)->daddr, .tos = RT_CONN_FLAGS(sk) } }, .proto = sk->sk_protocol, .uli_u = { .ports = { .sport = dccp_hdr(skb)->dccph_dport, .dport = dccp_hdr(skb)->dccph_sport } } }; security_skb_classify_flow(skb, &fl); if (ip_route_output_flow(net, &rt, &fl, sk, 0)) { IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); return NULL; } return &rt->u.dst; } static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, struct request_values *rv_unused) { int err = -1; struct sk_buff *skb; struct dst_entry *dst; dst = inet_csk_route_req(sk, req); if (dst == NULL) goto out; skb = dccp_make_response(sk, dst, req); if (skb != NULL) { const struct inet_request_sock *ireq = inet_rsk(req); struct dccp_hdr *dh = dccp_hdr(skb); dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr, ireq->rmt_addr); err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, ireq->rmt_addr, ireq->opt); err = net_xmit_eval(err); } out: dst_release(dst); return err; } static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) { int err; const struct iphdr *rxiph; struct sk_buff *skb; struct dst_entry *dst; struct net *net = dev_net(skb_dst(rxskb)->dev); struct sock *ctl_sk = net->dccp.v4_ctl_sk; /* Never send a reset in response to a reset. */ if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) return; if (skb_rtable(rxskb)->rt_type != RTN_LOCAL) return; dst = dccp_v4_route_skb(net, ctl_sk, rxskb); if (dst == NULL) return; skb = dccp_ctl_make_reset(ctl_sk, rxskb); if (skb == NULL) goto out; rxiph = ip_hdr(rxskb); dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr, rxiph->daddr); skb_dst_set(skb, dst_clone(dst)); bh_lock_sock(ctl_sk); err = ip_build_and_send_pkt(skb, ctl_sk, rxiph->daddr, rxiph->saddr, NULL); bh_unlock_sock(ctl_sk); if (net_xmit_eval(err) == 0) { DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); } out: dst_release(dst); } static void dccp_v4_reqsk_destructor(struct request_sock *req) { dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); kfree(inet_rsk(req)->opt); } static struct request_sock_ops dccp_request_sock_ops __read_mostly = { .family = PF_INET, .obj_size = sizeof(struct dccp_request_sock), .rtx_syn_ack = dccp_v4_send_response, .send_ack = dccp_reqsk_send_ack, .destructor = dccp_v4_reqsk_destructor, .send_reset = dccp_v4_ctl_send_reset, }; int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { struct inet_request_sock *ireq; struct request_sock *req; struct dccp_request_sock *dreq; const __be32 service = dccp_hdr_request(skb)->dccph_req_service; struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); /* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) return 0; /* discard, don't send a reset here */ if (dccp_bad_service_code(sk, service)) { dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; goto drop; } /* * TW buckets are converted to open requests without * limitations, they conserve resources and peer is * evidently real one. */ dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; if (inet_csk_reqsk_queue_is_full(sk)) goto drop; /* * Accept backlog is full. If we have already queued enough * of warm entries in syn queue, drop request. It is better than * clogging syn queue with openreqs with exponentially increasing * timeout. */ if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; req = inet_reqsk_alloc(&dccp_request_sock_ops); if (req == NULL) goto drop; if (dccp_reqsk_init(req, dccp_sk(sk), skb)) goto drop_and_free; dreq = dccp_rsk(req); if (dccp_parse_options(sk, dreq, skb)) goto drop_and_free; if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; ireq = inet_rsk(req); ireq->loc_addr = ip_hdr(skb)->daddr; ireq->rmt_addr = ip_hdr(skb)->saddr; /* * Step 3: Process LISTEN state * * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie * * In fact we defer setting S.GSR, S.SWL, S.SWH to * dccp_create_openreq_child. */ dreq->dreq_isr = dcb->dccpd_seq; dreq->dreq_iss = dccp_v4_init_sequence(skb); dreq->dreq_service = service; if (dccp_v4_send_response(sk, req, NULL)) goto drop_and_free; inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); return 0; drop_and_free: reqsk_free(req); drop: DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); return -1; } EXPORT_SYMBOL_GPL(dccp_v4_conn_request); int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) { struct dccp_hdr *dh = dccp_hdr(skb); if (sk->sk_state == DCCP_OPEN) { /* Fast path */ if (dccp_rcv_established(sk, skb, dh, skb->len)) goto reset; return 0; } /* * Step 3: Process LISTEN state * If P.type == Request or P contains a valid Init Cookie option, * (* Must scan the packet's options to check for Init * Cookies. Only Init Cookies are processed here, * however; other options are processed in Step 8. This * scan need only be performed if the endpoint uses Init * Cookies *) * (* Generate a new socket and switch to that socket *) * Set S := new socket for this port pair * S.state = RESPOND * Choose S.ISS (initial seqno) or set from Init Cookies * Initialize S.GAR := S.ISS * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies * Continue with S.state == RESPOND * (* A Response packet will be generated in Step 11 *) * Otherwise, * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return * * NOTE: the check for the packet types is done in * dccp_rcv_state_process */ if (sk->sk_state == DCCP_LISTEN) { struct sock *nsk = dccp_v4_hnd_req(sk, skb); if (nsk == NULL) goto discard; if (nsk != sk) { if (dccp_child_process(sk, nsk, skb)) goto reset; return 0; } } if (dccp_rcv_state_process(sk, skb, dh, skb->len)) goto reset; return 0; reset: dccp_v4_ctl_send_reset(sk, skb); discard: kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(dccp_v4_do_rcv); /** * dccp_invalid_packet - check for malformed packets * Implements RFC 4340, 8.5: Step 1: Check header basics * Packets that fail these checks are ignored and do not receive Resets. */ int dccp_invalid_packet(struct sk_buff *skb) { const struct dccp_hdr *dh; unsigned int cscov; if (skb->pkt_type != PACKET_HOST) return 1; /* If the packet is shorter than 12 bytes, drop packet and return */ if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) { DCCP_WARN("pskb_may_pull failed\n"); return 1; } dh = dccp_hdr(skb); /* If P.type is not understood, drop packet and return */ if (dh->dccph_type >= DCCP_PKT_INVALID) { DCCP_WARN("invalid packet type\n"); return 1; } /* * If P.Data Offset is too small for packet type, drop packet and return */ if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff); return 1; } /* * If P.Data Offset is too too large for packet, drop packet and return */ if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff); return 1; } /* * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet * has short sequence numbers), drop packet and return */ if ((dh->dccph_type < DCCP_PKT_DATA || dh->dccph_type > DCCP_PKT_DATAACK) && dh->dccph_x == 0) { DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n", dccp_packet_name(dh->dccph_type)); return 1; } /* * If P.CsCov is too large for the packet size, drop packet and return. * This must come _before_ checksumming (not as RFC 4340 suggests). */ cscov = dccp_csum_coverage(skb); if (cscov > skb->len) { DCCP_WARN("P.CsCov %u exceeds packet length %d\n", dh->dccph_cscov, skb->len); return 1; } /* If header checksum is incorrect, drop packet and return. * (This step is completed in the AF-dependent functions.) */ skb->csum = skb_checksum(skb, 0, cscov, 0); return 0; } EXPORT_SYMBOL_GPL(dccp_invalid_packet); /* this is called when real data arrives */ static int dccp_v4_rcv(struct sk_buff *skb) { const struct dccp_hdr *dh; const struct iphdr *iph; struct sock *sk; int min_cov; /* Step 1: Check header basics */ if (dccp_invalid_packet(skb)) goto discard_it; iph = ip_hdr(skb); /* Step 1: If header checksum is incorrect, drop packet and return */ if (dccp_v4_csum_finish(skb, iph->saddr, iph->daddr)) { DCCP_WARN("dropped packet with invalid checksum\n"); goto discard_it; } dh = dccp_hdr(skb); DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh); DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; dccp_pr_debug("%8.8s src=%pI4@%-5d dst=%pI4@%-5d seq=%llu", dccp_packet_name(dh->dccph_type), &iph->saddr, ntohs(dh->dccph_sport), &iph->daddr, ntohs(dh->dccph_dport), (unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq); if (dccp_packet_without_ack(skb)) { DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ; dccp_pr_debug_cat("\n"); } else { DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); dccp_pr_debug_cat(", ack=%llu\n", (unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq); } /* Step 2: * Look up flow ID in table and get corresponding socket */ sk = __inet_lookup_skb(&dccp_hashinfo, skb, dh->dccph_sport, dh->dccph_dport); /* * Step 2: * If no socket ... */ if (sk == NULL) { dccp_pr_debug("failed to look up flow ID in table and " "get corresponding socket\n"); goto no_dccp_socket; } /* * Step 2: * ... or S.state == TIMEWAIT, * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return */ if (sk->sk_state == DCCP_TIME_WAIT) { dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); inet_twsk_put(inet_twsk(sk)); goto no_dccp_socket; } /* * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage * o if MinCsCov = 0, only packets with CsCov = 0 are accepted * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov */ min_cov = dccp_sk(sk)->dccps_pcrlen; if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n", dh->dccph_cscov, min_cov); /* FIXME: "Such packets SHOULD be reported using Data Dropped * options (Section 11.7) with Drop Code 0, Protocol * Constraints." */ goto discard_and_relse; } if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; nf_reset(skb); return sk_receive_skb(sk, skb, 1); no_dccp_socket: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; /* * Step 2: * If no socket ... * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return */ if (dh->dccph_type != DCCP_PKT_RESET) { DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; dccp_v4_ctl_send_reset(sk, skb); } discard_it: kfree_skb(skb); return 0; discard_and_relse: sock_put(sk); goto discard_it; } static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { .queue_xmit = ip_queue_xmit, .send_check = dccp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .conn_request = dccp_v4_conn_request, .syn_recv_sock = dccp_v4_request_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), .bind_conflict = inet_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, .compat_getsockopt = compat_ip_getsockopt, #endif }; static int dccp_v4_init_sock(struct sock *sk) { static __u8 dccp_v4_ctl_sock_initialized; int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized); if (err == 0) { if (unlikely(!dccp_v4_ctl_sock_initialized)) dccp_v4_ctl_sock_initialized = 1; inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops; } return err; } static struct timewait_sock_ops dccp_timewait_sock_ops = { .twsk_obj_size = sizeof(struct inet_timewait_sock), }; static struct proto dccp_v4_prot = { .name = "DCCP", .owner = THIS_MODULE, .close = dccp_close, .connect = dccp_v4_connect, .disconnect = dccp_disconnect, .ioctl = dccp_ioctl, .init = dccp_v4_init_sock, .setsockopt = dccp_setsockopt, .getsockopt = dccp_getsockopt, .sendmsg = dccp_sendmsg, .recvmsg = dccp_recvmsg, .backlog_rcv = dccp_v4_do_rcv, .hash = inet_hash, .unhash = inet_unhash, .accept = inet_csk_accept, .get_port = inet_csk_get_port, .shutdown = dccp_shutdown, .destroy = dccp_destroy_sock, .orphan_count = &dccp_orphan_count, .max_header = MAX_DCCP_HEADER, .obj_size = sizeof(struct dccp_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .rsk_prot = &dccp_request_sock_ops, .twsk_prot = &dccp_timewait_sock_ops, .h.hashinfo = &dccp_hashinfo, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_dccp_setsockopt, .compat_getsockopt = compat_dccp_getsockopt, #endif }; static const struct net_protocol dccp_v4_protocol = { .handler = dccp_v4_rcv, .err_handler = dccp_v4_err, .no_policy = 1, .netns_ok = 1, }; static const struct proto_ops inet_dccp_ops = { .family = PF_INET, .owner = THIS_MODULE, .release = inet_release, .bind = inet_bind, .connect = inet_stream_connect, .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet_getname, /* FIXME: work on tcp_poll to rename it to inet_csk_poll */ .poll = dccp_poll, .ioctl = inet_ioctl, /* FIXME: work on inet_listen to rename it to sock_common_listen */ .listen = inet_dccp_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static struct inet_protosw dccp_v4_protosw = { .type = SOCK_DCCP, .protocol = IPPROTO_DCCP, .prot = &dccp_v4_prot, .ops = &inet_dccp_ops, .no_check = 0, .flags = INET_PROTOSW_ICSK, }; static int __net_init dccp_v4_init_net(struct net *net) { if (dccp_hashinfo.bhash == NULL) return -ESOCKTNOSUPPORT; return inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET, SOCK_DCCP, IPPROTO_DCCP, net); } static void __net_exit dccp_v4_exit_net(struct net *net) { inet_ctl_sock_destroy(net->dccp.v4_ctl_sk); } static struct pernet_operations dccp_v4_ops = { .init = dccp_v4_init_net, .exit = dccp_v4_exit_net, }; static int __init dccp_v4_init(void) { int err = proto_register(&dccp_v4_prot, 1); if (err != 0) goto out; err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP); if (err != 0) goto out_proto_unregister; inet_register_protosw(&dccp_v4_protosw); err = register_pernet_subsys(&dccp_v4_ops); if (err) goto out_destroy_ctl_sock; out: return err; out_destroy_ctl_sock: inet_unregister_protosw(&dccp_v4_protosw); inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP); out_proto_unregister: proto_unregister(&dccp_v4_prot); goto out; } static void __exit dccp_v4_exit(void) { unregister_pernet_subsys(&dccp_v4_ops); inet_unregister_protosw(&dccp_v4_protosw); inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP); proto_unregister(&dccp_v4_prot); } module_init(dccp_v4_init); module_exit(dccp_v4_exit); /* * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33) * values directly, Also cover the case where the protocol is not specified, * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP */ MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 33, 6); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 0, 6); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
gpl-2.0
Entropy512/linux_kernel_galaxyplayer
drivers/staging/cxt1e1/pmc93x6_eeprom.c
762
17227
/* pmc93x6_eeprom.c - PMC's 93LC46 EEPROM Device * * The 93LC46 is a low-power, serial Electrically Erasable and * Programmable Read Only Memory organized as 128 8-bit bytes. * * Accesses to the 93LC46 are done in a bit serial stream, organized * in a 3 wire format. Writes are internally timed by the device * (the In data bit is pulled low until the write is complete and * then is pulled high) and take about 6 milliseconds. * * Copyright (C) 2003-2005 SBE, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include "pmcc4_sysdep.h" #include "sbecom_inline_linux.h" #include "pmcc4.h" #include "sbe_promformat.h" #ifndef TRUE #define TRUE 1 #define FALSE 0 #endif #ifdef SBE_INCLUDE_SYMBOLS #define STATIC #else #define STATIC static #endif /*------------------------------------------------------------------------ * EEPROM address definitions *------------------------------------------------------------------------ * * The offset in the definitions below allows the test to skip over * areas of the EEPROM that other programs (such a VxWorks) are * using. */ #define EE_MFG (long)0 /* Index to manufacturing record */ #define EE_FIRST 0x28 /* Index to start testing at */ #define EE_LIMIT 128 /* Index to end testing at */ /* Bit Ordering for Instructions ** ** A0, A1, A2, A3, A4, A5, A6, OP0, OP1, SB (lsb, or 1st bit out) ** */ #define EPROM_EWEN 0x0019 /* Erase/Write enable (reversed) */ #define EPROM_EWDS 0x0001 /* Erase/Write disable (reversed) */ #define EPROM_READ 0x0003 /* Read (reversed) */ #define EPROM_WRITE 0x0005 /* Write (reversed) */ #define EPROM_ERASE 0x0007 /* Erase (reversed) */ #define EPROM_ERAL 0x0009 /* Erase All (reversed) */ #define EPROM_WRAL 0x0011 /* Write All (reversed) */ #define EPROM_ADR_SZ 7 /* Number of bits in offset address */ #define EPROM_OP_SZ 3 /* Number of bits in command */ #define SIZE_ADDR_OP (EPROM_ADR_SZ + EPROM_OP_SZ) #define LC46A_MAX_OPS 10 /* Number of bits in Instruction */ #define NUM_OF_BITS 8 /* Number of bits in data */ /* EEPROM signal bits */ #define EPROM_ACTIVE_OUT_BIT 0x0001 /* Out data bit */ #define EPROM_ACTIVE_IN_BIT 0x0002 /* In data bit */ #define ACTIVE_IN_BIT_SHIFT 0x0001 /* Shift In data bit to LSB */ #define EPROM_ENCS 0x0004 /* Set EEPROM CS during operation */ /*------------------------------------------------------------------------ * The ByteReverse table is used to reverses the 8 bits within a byte *------------------------------------------------------------------------ */ static unsigned char ByteReverse[256]; static int ByteReverseBuilt = FALSE; /*------------------------------------------------------------------------ * mfg_template - initial serial EEPROM data structure *------------------------------------------------------------------------ */ short mfg_template[sizeof (FLD_TYPE2)] = { PROM_FORMAT_TYPE2, /* type; */ 0x00, 0x1A, /* length[2]; */ 0x00, 0x00, 0x00, 0x00, /* Crc32[4]; */ 0x11, 0x76, /* Id[2]; */ 0x07, 0x05, /* SubId[2] E1; */ 0x00, 0xA0, 0xD6, 0x00, 0x00, 0x00, /* Serial[6]; */ 0x00, 0x00, 0x00, 0x00, /* CreateTime[4]; */ 0x00, 0x00, 0x00, 0x00, /* HeatRunTime[4]; */ 0x00, 0x00, 0x00, 0x00, /* HeatRunIterations[4]; */ 0x00, 0x00, 0x00, 0x00, /* HeatRunErrors[4]; */ }; /*------------------------------------------------------------------------ * BuildByteReverse - build the 8-bit reverse table *------------------------------------------------------------------------ * * The 'ByteReverse' table reverses the 8 bits within a byte * (the MSB becomes the LSB etc.). */ STATIC void BuildByteReverse (void) { long half; /* Used to build by powers to 2 */ int i; ByteReverse[0] = 0; for (half = 1; half < sizeof (ByteReverse); half <<= 1) for (i = 0; i < half; i++) ByteReverse[half + i] = (char) (ByteReverse[i] | (0x80 / half)); ByteReverseBuilt = TRUE; } /*------------------------------------------------------------------------ * eeprom_delay - small delay for EEPROM timing *------------------------------------------------------------------------ */ STATIC void eeprom_delay (void) { int timeout; for (timeout = 20; timeout; --timeout) { OS_uwait_dummy (); } } /*------------------------------------------------------------------------ * eeprom_put_byte - Send a byte to the EEPROM serially *------------------------------------------------------------------------ * * Given the PCI address and the data, this routine serially sends * the data to the EEPROM. */ void eeprom_put_byte (long addr, long data, int count) { u_int32_t output; while (--count >= 0) { output = (data & EPROM_ACTIVE_OUT_BIT) ? 1 : 0; /* Get next data bit */ output |= EPROM_ENCS; /* Add Chip Select */ data >>= 1; eeprom_delay (); pci_write_32 ((u_int32_t *) addr, output); /* Output it */ } } /*------------------------------------------------------------------------ * eeprom_get_byte - Receive a byte from the EEPROM serially *------------------------------------------------------------------------ * * Given the PCI address, this routine serially fetches the data * from the EEPROM. */ u_int32_t eeprom_get_byte (long addr) { u_int32_t input; u_int32_t data; int count; /* Start the Reading of DATA ** ** The first read is a dummy as the data is latched in the ** EPLD and read on the next read access to the EEPROM. */ input = pci_read_32 ((u_int32_t *) addr); data = 0; count = NUM_OF_BITS; while (--count >= 0) { eeprom_delay (); input = pci_read_32 ((u_int32_t *) addr); data <<= 1; /* Shift data over */ data |= (input & EPROM_ACTIVE_IN_BIT) ? 1 : 0; } return data; } /*------------------------------------------------------------------------ * disable_pmc_eeprom - Disable writes to the EEPROM *------------------------------------------------------------------------ * * Issue the EEPROM command to disable writes. */ STATIC void disable_pmc_eeprom (long addr) { eeprom_put_byte (addr, EPROM_EWDS, SIZE_ADDR_OP); pci_write_32 ((u_int32_t *) addr, 0); /* this removes Chip Select * from EEPROM */ } /*------------------------------------------------------------------------ * enable_pmc_eeprom - Enable writes to the EEPROM *------------------------------------------------------------------------ * * Issue the EEPROM command to enable writes. */ STATIC void enable_pmc_eeprom (long addr) { eeprom_put_byte (addr, EPROM_EWEN, SIZE_ADDR_OP); pci_write_32 ((u_int32_t *) addr, 0); /* this removes Chip Select * from EEPROM */ } /*------------------------------------------------------------------------ * pmc_eeprom_read - EEPROM location read *------------------------------------------------------------------------ * * Given a EEPROM PCI address and location offset, this routine returns * the contents of the specified location to the calling routine. */ u_int32_t pmc_eeprom_read (long addr, long mem_offset) { u_int32_t data; /* Data from chip */ if (!ByteReverseBuilt) BuildByteReverse (); mem_offset = ByteReverse[0x7F & mem_offset]; /* Reverse address */ /* * NOTE: The max offset address is 128 or half the reversal table. So the * LSB is always zero and counts as a built in shift of one bit. So even * though we need to shift 3 bits to make room for the command, we only * need to shift twice more because of the built in shift. */ mem_offset <<= 2; /* Shift for command */ mem_offset |= EPROM_READ; /* Add command */ eeprom_put_byte (addr, mem_offset, SIZE_ADDR_OP); /* Output chip address */ data = eeprom_get_byte (addr); /* Read chip data */ pci_write_32 ((u_int32_t *) addr, 0); /* Remove Chip Select from * EEPROM */ return (data & 0x000000FF); } /*------------------------------------------------------------------------ * pmc_eeprom_write - EEPROM location write *------------------------------------------------------------------------ * * Given a EEPROM PCI address, location offset and value, this * routine writes the value to the specified location. * * Note: it is up to the caller to determine if the write * operation succeeded. */ int pmc_eeprom_write (long addr, long mem_offset, u_int32_t data) { volatile u_int32_t temp; int count; if (!ByteReverseBuilt) BuildByteReverse (); mem_offset = ByteReverse[0x7F & mem_offset]; /* Reverse address */ /* * NOTE: The max offset address is 128 or half the reversal table. So the * LSB is always zero and counts as a built in shift of one bit. So even * though we need to shift 3 bits to make room for the command, we only * need to shift twice more because of the built in shift. */ mem_offset <<= 2; /* Shift for command */ mem_offset |= EPROM_WRITE; /* Add command */ eeprom_put_byte (addr, mem_offset, SIZE_ADDR_OP); /* Output chip address */ data = ByteReverse[0xFF & data];/* Reverse data */ eeprom_put_byte (addr, data, NUM_OF_BITS); /* Output chip data */ pci_write_32 ((u_int32_t *) addr, 0); /* Remove Chip Select from * EEPROM */ /* ** Must see Data In at a low state before completing this transaction. ** ** Afterwards, the data bit will return to a high state, ~6 ms, terminating ** the operation. */ pci_write_32 ((u_int32_t *) addr, EPROM_ENCS); /* Re-enable Chip Select */ temp = pci_read_32 ((u_int32_t *) addr); /* discard first read */ temp = pci_read_32 ((u_int32_t *) addr); if (temp & EPROM_ACTIVE_IN_BIT) { temp = pci_read_32 ((u_int32_t *) addr); if (temp & EPROM_ACTIVE_IN_BIT) { pci_write_32 ((u_int32_t *) addr, 0); /* Remove Chip Select * from EEPROM */ return (1); } } count = 1000; while (count--) { for (temp = 0; temp < 0x10; temp++) OS_uwait_dummy (); if (pci_read_32 ((u_int32_t *) addr) & EPROM_ACTIVE_IN_BIT) break; } if (count == -1) return (2); return (0); } /*------------------------------------------------------------------------ * pmcGetBuffValue - read the specified value from buffer *------------------------------------------------------------------------ */ long pmcGetBuffValue (char *ptr, int size) { long value = 0; int index; for (index = 0; index < size; ++index) { value <<= 8; value |= ptr[index] & 0xFF; } return value; } /*------------------------------------------------------------------------ * pmcSetBuffValue - save the specified value to buffer *------------------------------------------------------------------------ */ void pmcSetBuffValue (char *ptr, long value, int size) { int index = size; while (--index >= 0) { ptr[index] = (char) (value & 0xFF); value >>= 8; } } /*------------------------------------------------------------------------ * pmc_eeprom_read_buffer - read EEPROM data into specified buffer *------------------------------------------------------------------------ */ void pmc_eeprom_read_buffer (long addr, long mem_offset, char *dest_ptr, int size) { while (--size >= 0) *dest_ptr++ = (char) pmc_eeprom_read (addr, mem_offset++); } /*------------------------------------------------------------------------ * pmc_eeprom_write_buffer - write EEPROM data from specified buffer *------------------------------------------------------------------------ */ void pmc_eeprom_write_buffer (long addr, long mem_offset, char *dest_ptr, int size) { enable_pmc_eeprom (addr); while (--size >= 0) pmc_eeprom_write (addr, mem_offset++, *dest_ptr++); disable_pmc_eeprom (addr); } /*------------------------------------------------------------------------ * pmcCalcCrc - calculate the CRC for the serial EEPROM structure *------------------------------------------------------------------------ */ u_int32_t pmcCalcCrc_T01 (void *bufp) { FLD_TYPE2 *buf = bufp; u_int32_t crc; /* CRC of the structure */ /* Calc CRC for type and length fields */ sbeCrc ( (u_int8_t *) &buf->type, (u_int32_t) STRUCT_OFFSET (FLD_TYPE1, Crc32), (u_int32_t) 0, (u_int32_t *) &crc); #ifdef EEPROM_TYPE_DEBUG pr_info("sbeCrc: crc 1 calculated as %08x\n", crc); /* RLD DEBUG */ #endif return ~crc; } u_int32_t pmcCalcCrc_T02 (void *bufp) { FLD_TYPE2 *buf = bufp; u_int32_t crc; /* CRC of the structure */ /* Calc CRC for type and length fields */ sbeCrc ( (u_int8_t *) &buf->type, (u_int32_t) STRUCT_OFFSET (FLD_TYPE2, Crc32), (u_int32_t) 0, (u_int32_t *) &crc); /* Calc CRC for remaining fields */ sbeCrc ( (u_int8_t *) &buf->Id[0], (u_int32_t) (sizeof (FLD_TYPE2) - STRUCT_OFFSET (FLD_TYPE2, Id)), (u_int32_t) crc, (u_int32_t *) &crc); #ifdef EEPROM_TYPE_DEBUG pr_info("sbeCrc: crc 2 calculated as %08x\n", crc); /* RLD DEBUG */ #endif return crc; } /*------------------------------------------------------------------------ * pmc_init_seeprom - initialize the serial EEPROM structure *------------------------------------------------------------------------ * * At the front of the serial EEPROM there is a record that contains * manufacturing information. If the info does not already exist, it * is created. The only field modifiable by the operator is the * serial number field. */ void pmc_init_seeprom (u_int32_t addr, u_int32_t serialNum) { PROMFORMAT buffer; /* Memory image of structure */ u_int32_t crc; /* CRC of structure */ time_t createTime; int i; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) createTime = CURRENT_TIME; #else createTime = get_seconds (); #endif /* use template data */ for (i = 0; i < sizeof (FLD_TYPE2); ++i) buffer.bytes[i] = mfg_template[i]; /* Update serial number field in buffer */ pmcSetBuffValue (&buffer.fldType2.Serial[3], serialNum, 3); /* Update create time field in buffer */ pmcSetBuffValue (&buffer.fldType2.CreateTime[0], createTime, 4); /* Update CRC field in buffer */ crc = pmcCalcCrc_T02 (&buffer); pmcSetBuffValue (&buffer.fldType2.Crc32[0], crc, 4); #ifdef DEBUG for (i = 0; i < sizeof (FLD_TYPE2); ++i) pr_info("[%02X] = %02X\n", i, buffer.bytes[i] & 0xFF); #endif /* Write structure to serial EEPROM */ pmc_eeprom_write_buffer (addr, EE_MFG, (char *) &buffer, sizeof (FLD_TYPE2)); } char pmc_verify_cksum (void *bufp) { FLD_TYPE1 *buf1 = bufp; FLD_TYPE2 *buf2 = bufp; u_int32_t crc1, crc2; /* CRC read from EEPROM */ /* Retrieve contents of CRC field */ crc1 = pmcGetBuffValue (&buf1->Crc32[0], sizeof (buf1->Crc32)); #ifdef EEPROM_TYPE_DEBUG pr_info("EEPROM: chksum 1 reads as %08x\n", crc1); /* RLD DEBUG */ #endif if ((buf1->type == PROM_FORMAT_TYPE1) && (pmcCalcCrc_T01 ((void *) buf1) == crc1)) return PROM_FORMAT_TYPE1; /* checksum type 1 verified */ crc2 = pmcGetBuffValue (&buf2->Crc32[0], sizeof (buf2->Crc32)); #ifdef EEPROM_TYPE_DEBUG pr_info("EEPROM: chksum 2 reads as %08x\n", crc2); /* RLD DEBUG */ #endif if ((buf2->type == PROM_FORMAT_TYPE2) && (pmcCalcCrc_T02 ((void *) buf2) == crc2)) return PROM_FORMAT_TYPE2; /* checksum type 2 verified */ return PROM_FORMAT_Unk; /* failed to validate */ } /*** End-of-File ***/
gpl-2.0
kingklick/kk-nexus-kernel
net/atm/br2684.c
762
21119
/* * Ethernet netdevice using ATM AAL5 as underlying carrier * (RFC1483 obsoleted by RFC2684) for Linux * * Authors: Marcell GAL, 2000, XDSL Ltd, Hungary * Eric Kinzie, 2006-2007, US Naval Research Laboratory */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/ip.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <net/arp.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/capability.h> #include <linux/seq_file.h> #include <linux/atmbr2684.h> #include "common.h" static void skb_debug(const struct sk_buff *skb) { #ifdef SKB_DEBUG #define NUM2PRINT 50 print_hex_dump(KERN_DEBUG, "br2684: skb: ", DUMP_OFFSET, 16, 1, skb->data, min(NUM2PRINT, skb->len), true); #endif } #define BR2684_ETHERTYPE_LEN 2 #define BR2684_PAD_LEN 2 #define LLC 0xaa, 0xaa, 0x03 #define SNAP_BRIDGED 0x00, 0x80, 0xc2 #define SNAP_ROUTED 0x00, 0x00, 0x00 #define PID_ETHERNET 0x00, 0x07 #define ETHERTYPE_IPV4 0x08, 0x00 #define ETHERTYPE_IPV6 0x86, 0xdd #define PAD_BRIDGED 0x00, 0x00 static const unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 }; static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 }; static const unsigned char llc_oui_pid_pad[] = { LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED }; static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 }; static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 }; enum br2684_encaps { e_vc = BR2684_ENCAPS_VC, e_llc = BR2684_ENCAPS_LLC, }; struct br2684_vcc { struct atm_vcc *atmvcc; struct net_device *device; /* keep old push, pop functions for chaining */ void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb); void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); enum br2684_encaps encaps; struct list_head brvccs; #ifdef CONFIG_ATM_BR2684_IPFILTER struct br2684_filter filter; #endif /* CONFIG_ATM_BR2684_IPFILTER */ unsigned copies_needed, copies_failed; }; struct br2684_dev { struct net_device *net_dev; struct list_head br2684_devs; int number; struct list_head brvccs; /* one device <=> one vcc (before xmas) */ int mac_was_set; enum br2684_payload payload; }; /* * This lock should be held for writing any time the list of devices or * their attached vcc's could be altered. It should be held for reading * any time these are being queried. Note that we sometimes need to * do read-locking under interrupt context, so write locking must block * the current CPU's interrupts */ static DEFINE_RWLOCK(devs_lock); static LIST_HEAD(br2684_devs); static inline struct br2684_dev *BRPRIV(const struct net_device *net_dev) { return (struct br2684_dev *)netdev_priv(net_dev); } static inline struct net_device *list_entry_brdev(const struct list_head *le) { return list_entry(le, struct br2684_dev, br2684_devs)->net_dev; } static inline struct br2684_vcc *BR2684_VCC(const struct atm_vcc *atmvcc) { return (struct br2684_vcc *)(atmvcc->user_back); } static inline struct br2684_vcc *list_entry_brvcc(const struct list_head *le) { return list_entry(le, struct br2684_vcc, brvccs); } /* Caller should hold read_lock(&devs_lock) */ static struct net_device *br2684_find_dev(const struct br2684_if_spec *s) { struct list_head *lh; struct net_device *net_dev; switch (s->method) { case BR2684_FIND_BYNUM: list_for_each(lh, &br2684_devs) { net_dev = list_entry_brdev(lh); if (BRPRIV(net_dev)->number == s->spec.devnum) return net_dev; } break; case BR2684_FIND_BYIFNAME: list_for_each(lh, &br2684_devs) { net_dev = list_entry_brdev(lh); if (!strncmp(net_dev->name, s->spec.ifname, IFNAMSIZ)) return net_dev; } break; } return NULL; } /* chained vcc->pop function. Check if we should wake the netif_queue */ static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb) { struct br2684_vcc *brvcc = BR2684_VCC(vcc); struct net_device *net_dev = skb->dev; pr_debug("(vcc %p ; net_dev %p )\n", vcc, net_dev); brvcc->old_pop(vcc, skb); if (!net_dev) return; if (atm_may_send(vcc, 0)) netif_wake_queue(net_dev); } /* * Send a packet out a particular vcc. Not to useful right now, but paves * the way for multiple vcc's per itf. Returns true if we can send, * otherwise false */ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev, struct br2684_vcc *brvcc) { struct br2684_dev *brdev = BRPRIV(dev); struct atm_vcc *atmvcc; int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2; if (skb_headroom(skb) < minheadroom) { struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom); brvcc->copies_needed++; dev_kfree_skb(skb); if (skb2 == NULL) { brvcc->copies_failed++; return 0; } skb = skb2; } if (brvcc->encaps == e_llc) { if (brdev->payload == p_bridged) { skb_push(skb, sizeof(llc_oui_pid_pad)); skb_copy_to_linear_data(skb, llc_oui_pid_pad, sizeof(llc_oui_pid_pad)); } else if (brdev->payload == p_routed) { unsigned short prot = ntohs(skb->protocol); skb_push(skb, sizeof(llc_oui_ipv4)); switch (prot) { case ETH_P_IP: skb_copy_to_linear_data(skb, llc_oui_ipv4, sizeof(llc_oui_ipv4)); break; case ETH_P_IPV6: skb_copy_to_linear_data(skb, llc_oui_ipv6, sizeof(llc_oui_ipv6)); break; default: dev_kfree_skb(skb); return 0; } } } else { /* e_vc */ if (brdev->payload == p_bridged) { skb_push(skb, 2); memset(skb->data, 0, 2); } else { /* p_routed */ skb_pull(skb, ETH_HLEN); } } skb_debug(skb); ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc; pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev); atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc); ATM_SKB(skb)->atm_options = atmvcc->atm_options; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; atmvcc->send(atmvcc, skb); if (!atm_may_send(atmvcc, 0)) { netif_stop_queue(brvcc->device); /*check for race with br2684_pop*/ if (atm_may_send(atmvcc, 0)) netif_start_queue(brvcc->device); } return 1; } static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb, const struct br2684_dev *brdev) { return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */ } static netdev_tx_t br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct br2684_dev *brdev = BRPRIV(dev); struct br2684_vcc *brvcc; pr_debug("skb_dst(skb)=%p\n", skb_dst(skb)); read_lock(&devs_lock); brvcc = pick_outgoing_vcc(skb, brdev); if (brvcc == NULL) { pr_debug("no vcc attached to dev %s\n", dev->name); dev->stats.tx_errors++; dev->stats.tx_carrier_errors++; /* netif_stop_queue(dev); */ dev_kfree_skb(skb); read_unlock(&devs_lock); return NETDEV_TX_OK; } if (!br2684_xmit_vcc(skb, dev, brvcc)) { /* * We should probably use netif_*_queue() here, but that * involves added complication. We need to walk before * we can run. * * Don't free here! this pointer might be no longer valid! */ dev->stats.tx_errors++; dev->stats.tx_fifo_errors++; } read_unlock(&devs_lock); return NETDEV_TX_OK; } /* * We remember when the MAC gets set, so we don't override it later with * the ESI of the ATM card of the first VC */ static int br2684_mac_addr(struct net_device *dev, void *p) { int err = eth_mac_addr(dev, p); if (!err) BRPRIV(dev)->mac_was_set = 1; return err; } #ifdef CONFIG_ATM_BR2684_IPFILTER /* this IOCTL is experimental. */ static int br2684_setfilt(struct atm_vcc *atmvcc, void __user * arg) { struct br2684_vcc *brvcc; struct br2684_filter_set fs; if (copy_from_user(&fs, arg, sizeof fs)) return -EFAULT; if (fs.ifspec.method != BR2684_FIND_BYNOTHING) { /* * This is really a per-vcc thing, but we can also search * by device. */ struct br2684_dev *brdev; read_lock(&devs_lock); brdev = BRPRIV(br2684_find_dev(&fs.ifspec)); if (brdev == NULL || list_empty(&brdev->brvccs) || brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */ brvcc = NULL; else brvcc = list_entry_brvcc(brdev->brvccs.next); read_unlock(&devs_lock); if (brvcc == NULL) return -ESRCH; } else brvcc = BR2684_VCC(atmvcc); memcpy(&brvcc->filter, &fs.filter, sizeof(brvcc->filter)); return 0; } /* Returns 1 if packet should be dropped */ static inline int packet_fails_filter(__be16 type, struct br2684_vcc *brvcc, struct sk_buff *skb) { if (brvcc->filter.netmask == 0) return 0; /* no filter in place */ if (type == htons(ETH_P_IP) && (((struct iphdr *)(skb->data))->daddr & brvcc->filter. netmask) == brvcc->filter.prefix) return 0; if (type == htons(ETH_P_ARP)) return 0; /* * TODO: we should probably filter ARPs too.. don't want to have * them returning values that don't make sense, or is that ok? */ return 1; /* drop */ } #endif /* CONFIG_ATM_BR2684_IPFILTER */ static void br2684_close_vcc(struct br2684_vcc *brvcc) { pr_debug("removing VCC %p from dev %p\n", brvcc, brvcc->device); write_lock_irq(&devs_lock); list_del(&brvcc->brvccs); write_unlock_irq(&devs_lock); brvcc->atmvcc->user_back = NULL; /* what about vcc->recvq ??? */ brvcc->old_push(brvcc->atmvcc, NULL); /* pass on the bad news */ kfree(brvcc); module_put(THIS_MODULE); } /* when AAL5 PDU comes in: */ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) { struct br2684_vcc *brvcc = BR2684_VCC(atmvcc); struct net_device *net_dev = brvcc->device; struct br2684_dev *brdev = BRPRIV(net_dev); pr_debug("\n"); if (unlikely(skb == NULL)) { /* skb==NULL means VCC is being destroyed */ br2684_close_vcc(brvcc); if (list_empty(&brdev->brvccs)) { write_lock_irq(&devs_lock); list_del(&brdev->br2684_devs); write_unlock_irq(&devs_lock); unregister_netdev(net_dev); free_netdev(net_dev); } return; } skb_debug(skb); atm_return(atmvcc, skb->truesize); pr_debug("skb from brdev %p\n", brdev); if (brvcc->encaps == e_llc) { if (skb->len > 7 && skb->data[7] == 0x01) __skb_trim(skb, skb->len - 4); /* accept packets that have "ipv[46]" in the snap header */ if ((skb->len >= (sizeof(llc_oui_ipv4))) && (memcmp(skb->data, llc_oui_ipv4, sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) { if (memcmp(skb->data + 6, ethertype_ipv6, sizeof(ethertype_ipv6)) == 0) skb->protocol = htons(ETH_P_IPV6); else if (memcmp(skb->data + 6, ethertype_ipv4, sizeof(ethertype_ipv4)) == 0) skb->protocol = htons(ETH_P_IP); else goto error; skb_pull(skb, sizeof(llc_oui_ipv4)); skb_reset_network_header(skb); skb->pkt_type = PACKET_HOST; /* * Let us waste some time for checking the encapsulation. * Note, that only 7 char is checked so frames with a valid FCS * are also accepted (but FCS is not checked of course). */ } else if ((skb->len >= sizeof(llc_oui_pid_pad)) && (memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) { skb_pull(skb, sizeof(llc_oui_pid_pad)); skb->protocol = eth_type_trans(skb, net_dev); } else goto error; } else { /* e_vc */ if (brdev->payload == p_routed) { struct iphdr *iph; skb_reset_network_header(skb); iph = ip_hdr(skb); if (iph->version == 4) skb->protocol = htons(ETH_P_IP); else if (iph->version == 6) skb->protocol = htons(ETH_P_IPV6); else goto error; skb->pkt_type = PACKET_HOST; } else { /* p_bridged */ /* first 2 chars should be 0 */ if (*((u16 *) (skb->data)) != 0) goto error; skb_pull(skb, BR2684_PAD_LEN); skb->protocol = eth_type_trans(skb, net_dev); } } #ifdef CONFIG_ATM_BR2684_IPFILTER if (unlikely(packet_fails_filter(skb->protocol, brvcc, skb))) goto dropped; #endif /* CONFIG_ATM_BR2684_IPFILTER */ skb->dev = net_dev; ATM_SKB(skb)->vcc = atmvcc; /* needed ? */ pr_debug("received packet's protocol: %x\n", ntohs(skb->protocol)); skb_debug(skb); /* sigh, interface is down? */ if (unlikely(!(net_dev->flags & IFF_UP))) goto dropped; net_dev->stats.rx_packets++; net_dev->stats.rx_bytes += skb->len; memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(skb); return; dropped: net_dev->stats.rx_dropped++; goto free_skb; error: net_dev->stats.rx_errors++; free_skb: dev_kfree_skb(skb); } /* * Assign a vcc to a dev * Note: we do not have explicit unassign, but look at _push() */ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) { struct sk_buff_head queue; int err; struct br2684_vcc *brvcc; struct sk_buff *skb, *tmp; struct sk_buff_head *rq; struct br2684_dev *brdev; struct net_device *net_dev; struct atm_backend_br2684 be; unsigned long flags; if (copy_from_user(&be, arg, sizeof be)) return -EFAULT; brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL); if (!brvcc) return -ENOMEM; write_lock_irq(&devs_lock); net_dev = br2684_find_dev(&be.ifspec); if (net_dev == NULL) { pr_err("tried to attach to non-existant device\n"); err = -ENXIO; goto error; } brdev = BRPRIV(net_dev); if (atmvcc->push == NULL) { err = -EBADFD; goto error; } if (!list_empty(&brdev->brvccs)) { /* Only 1 VCC/dev right now */ err = -EEXIST; goto error; } if (be.fcs_in != BR2684_FCSIN_NO || be.fcs_out != BR2684_FCSOUT_NO || be.fcs_auto || be.has_vpiid || be.send_padding || (be.encaps != BR2684_ENCAPS_VC && be.encaps != BR2684_ENCAPS_LLC) || be.min_size != 0) { err = -EINVAL; goto error; } pr_debug("vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps, brvcc); if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) { unsigned char *esi = atmvcc->dev->esi; if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5]) memcpy(net_dev->dev_addr, esi, net_dev->addr_len); else net_dev->dev_addr[2] = 1; } list_add(&brvcc->brvccs, &brdev->brvccs); write_unlock_irq(&devs_lock); brvcc->device = net_dev; brvcc->atmvcc = atmvcc; atmvcc->user_back = brvcc; brvcc->encaps = (enum br2684_encaps)be.encaps; brvcc->old_push = atmvcc->push; brvcc->old_pop = atmvcc->pop; barrier(); atmvcc->push = br2684_push; atmvcc->pop = br2684_pop; __skb_queue_head_init(&queue); rq = &sk_atm(atmvcc)->sk_receive_queue; spin_lock_irqsave(&rq->lock, flags); skb_queue_splice_init(rq, &queue); spin_unlock_irqrestore(&rq->lock, flags); skb_queue_walk_safe(&queue, skb, tmp) { struct net_device *dev = skb->dev; dev->stats.rx_bytes -= skb->len; dev->stats.rx_packets--; br2684_push(atmvcc, skb); } __module_get(THIS_MODULE); return 0; error: write_unlock_irq(&devs_lock); kfree(brvcc); return err; } static const struct net_device_ops br2684_netdev_ops = { .ndo_start_xmit = br2684_start_xmit, .ndo_set_mac_address = br2684_mac_addr, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static const struct net_device_ops br2684_netdev_ops_routed = { .ndo_start_xmit = br2684_start_xmit, .ndo_set_mac_address = br2684_mac_addr, .ndo_change_mtu = eth_change_mtu }; static void br2684_setup(struct net_device *netdev) { struct br2684_dev *brdev = BRPRIV(netdev); ether_setup(netdev); brdev->net_dev = netdev; netdev->netdev_ops = &br2684_netdev_ops; INIT_LIST_HEAD(&brdev->brvccs); } static void br2684_setup_routed(struct net_device *netdev) { struct br2684_dev *brdev = BRPRIV(netdev); brdev->net_dev = netdev; netdev->hard_header_len = 0; netdev->netdev_ops = &br2684_netdev_ops_routed; netdev->addr_len = 0; netdev->mtu = 1500; netdev->type = ARPHRD_PPP; netdev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; netdev->tx_queue_len = 100; INIT_LIST_HEAD(&brdev->brvccs); } static int br2684_create(void __user *arg) { int err; struct net_device *netdev; struct br2684_dev *brdev; struct atm_newif_br2684 ni; enum br2684_payload payload; pr_debug("\n"); if (copy_from_user(&ni, arg, sizeof ni)) return -EFAULT; if (ni.media & BR2684_FLAG_ROUTED) payload = p_routed; else payload = p_bridged; ni.media &= 0xffff; /* strip flags */ if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500) return -EINVAL; netdev = alloc_netdev(sizeof(struct br2684_dev), ni.ifname[0] ? ni.ifname : "nas%d", (payload == p_routed) ? br2684_setup_routed : br2684_setup); if (!netdev) return -ENOMEM; brdev = BRPRIV(netdev); pr_debug("registered netdev %s\n", netdev->name); /* open, stop, do_ioctl ? */ err = register_netdev(netdev); if (err < 0) { pr_err("register_netdev failed\n"); free_netdev(netdev); return err; } write_lock_irq(&devs_lock); brdev->payload = payload; brdev->number = list_empty(&br2684_devs) ? 1 : BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1; list_add_tail(&brdev->br2684_devs, &br2684_devs); write_unlock_irq(&devs_lock); return 0; } /* * This handles ioctls actually performed on our vcc - we must return * -ENOIOCTLCMD for any unrecognized ioctl */ static int br2684_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct atm_vcc *atmvcc = ATM_SD(sock); void __user *argp = (void __user *)arg; atm_backend_t b; int err; switch (cmd) { case ATM_SETBACKEND: case ATM_NEWBACKENDIF: err = get_user(b, (atm_backend_t __user *) argp); if (err) return -EFAULT; if (b != ATM_BACKEND_BR2684) return -ENOIOCTLCMD; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (cmd == ATM_SETBACKEND) return br2684_regvcc(atmvcc, argp); else return br2684_create(argp); #ifdef CONFIG_ATM_BR2684_IPFILTER case BR2684_SETFILT: if (atmvcc->push != br2684_push) return -ENOIOCTLCMD; if (!capable(CAP_NET_ADMIN)) return -EPERM; err = br2684_setfilt(atmvcc, argp); return err; #endif /* CONFIG_ATM_BR2684_IPFILTER */ } return -ENOIOCTLCMD; } static struct atm_ioctl br2684_ioctl_ops = { .owner = THIS_MODULE, .ioctl = br2684_ioctl, }; #ifdef CONFIG_PROC_FS static void *br2684_seq_start(struct seq_file *seq, loff_t * pos) __acquires(devs_lock) { read_lock(&devs_lock); return seq_list_start(&br2684_devs, *pos); } static void *br2684_seq_next(struct seq_file *seq, void *v, loff_t * pos) { return seq_list_next(v, &br2684_devs, pos); } static void br2684_seq_stop(struct seq_file *seq, void *v) __releases(devs_lock) { read_unlock(&devs_lock); } static int br2684_seq_show(struct seq_file *seq, void *v) { const struct br2684_dev *brdev = list_entry(v, struct br2684_dev, br2684_devs); const struct net_device *net_dev = brdev->net_dev; const struct br2684_vcc *brvcc; seq_printf(seq, "dev %.16s: num=%d, mac=%pM (%s)\n", net_dev->name, brdev->number, net_dev->dev_addr, brdev->mac_was_set ? "set" : "auto"); list_for_each_entry(brvcc, &brdev->brvccs, brvccs) { seq_printf(seq, " vcc %d.%d.%d: encaps=%s payload=%s" ", failed copies %u/%u" "\n", brvcc->atmvcc->dev->number, brvcc->atmvcc->vpi, brvcc->atmvcc->vci, (brvcc->encaps == e_llc) ? "LLC" : "VC", (brdev->payload == p_bridged) ? "bridged" : "routed", brvcc->copies_failed, brvcc->copies_needed); #ifdef CONFIG_ATM_BR2684_IPFILTER #define b1(var, byte) ((u8 *) &brvcc->filter.var)[byte] #define bs(var) b1(var, 0), b1(var, 1), b1(var, 2), b1(var, 3) if (brvcc->filter.netmask != 0) seq_printf(seq, " filter=%d.%d.%d.%d/" "%d.%d.%d.%d\n", bs(prefix), bs(netmask)); #undef bs #undef b1 #endif /* CONFIG_ATM_BR2684_IPFILTER */ } return 0; } static const struct seq_operations br2684_seq_ops = { .start = br2684_seq_start, .next = br2684_seq_next, .stop = br2684_seq_stop, .show = br2684_seq_show, }; static int br2684_proc_open(struct inode *inode, struct file *file) { return seq_open(file, &br2684_seq_ops); } static const struct file_operations br2684_proc_ops = { .owner = THIS_MODULE, .open = br2684_proc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; extern struct proc_dir_entry *atm_proc_root; /* from proc.c */ #endif /* CONFIG_PROC_FS */ static int __init br2684_init(void) { #ifdef CONFIG_PROC_FS struct proc_dir_entry *p; p = proc_create("br2684", 0, atm_proc_root, &br2684_proc_ops); if (p == NULL) return -ENOMEM; #endif register_atm_ioctl(&br2684_ioctl_ops); return 0; } static void __exit br2684_exit(void) { struct net_device *net_dev; struct br2684_dev *brdev; struct br2684_vcc *brvcc; deregister_atm_ioctl(&br2684_ioctl_ops); #ifdef CONFIG_PROC_FS remove_proc_entry("br2684", atm_proc_root); #endif while (!list_empty(&br2684_devs)) { net_dev = list_entry_brdev(br2684_devs.next); brdev = BRPRIV(net_dev); while (!list_empty(&brdev->brvccs)) { brvcc = list_entry_brvcc(brdev->brvccs.next); br2684_close_vcc(brvcc); } list_del(&brdev->br2684_devs); unregister_netdev(net_dev); free_netdev(net_dev); } } module_init(br2684_init); module_exit(br2684_exit); MODULE_AUTHOR("Marcell GAL"); MODULE_DESCRIPTION("RFC2684 bridged protocols over ATM/AAL5"); MODULE_LICENSE("GPL");
gpl-2.0
daedalus81/linux-working
arch/frv/mb93090-mb00/pci-vdk.c
2042
12634
/* pci-vdk.c: MB93090-MB00 (VDK) PCI support * * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <asm/segment.h> #include <asm/io.h> #include <asm/mb-regs.h> #include <asm/mb86943a.h> #include "pci-frv.h" unsigned int __nongpreldata pci_probe = 1; int __nongpreldata pcibios_last_bus = -1; struct pci_ops *__nongpreldata pci_root_ops; /* * The accessible PCI window does not cover the entire CPU address space, but * there are devices we want to access outside of that window, so we need to * insert specific PCI bus resources instead of using the platform-level bus * resources directly for the PCI root bus. * * These are configured and inserted by pcibios_init() and are attached to the * root bus by pcibios_fixup_bus(). */ static struct resource pci_ioport_resource = { .name = "PCI IO", .start = 0, .end = IO_SPACE_LIMIT, .flags = IORESOURCE_IO, }; static struct resource pci_iomem_resource = { .name = "PCI mem", .start = 0, .end = -1, .flags = IORESOURCE_MEM, }; /* * Functions for accessing PCI configuration space */ #define CONFIG_CMD(bus, dev, where) \ (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3)) #define __set_PciCfgAddr(A) writel((A), (volatile void __iomem *) __region_CS1 + 0x80) #define __get_PciCfgDataB(A) readb((volatile void __iomem *) __region_CS1 + 0x88 + ((A) & 3)) #define __get_PciCfgDataW(A) readw((volatile void __iomem *) __region_CS1 + 0x88 + ((A) & 2)) #define __get_PciCfgDataL(A) readl((volatile void __iomem *) __region_CS1 + 0x88) #define __set_PciCfgDataB(A,V) \ writeb((V), (volatile void __iomem *) __region_CS1 + 0x88 + (3 - ((A) & 3))) #define __set_PciCfgDataW(A,V) \ writew((V), (volatile void __iomem *) __region_CS1 + 0x88 + (2 - ((A) & 2))) #define __set_PciCfgDataL(A,V) \ writel((V), (volatile void __iomem *) __region_CS1 + 0x88) #define __get_PciBridgeDataB(A) readb((volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __get_PciBridgeDataW(A) readw((volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __get_PciBridgeDataL(A) readl((volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __set_PciBridgeDataB(A,V) writeb((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __set_PciBridgeDataW(A,V) writew((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A)) #define __set_PciBridgeDataL(A,V) writel((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A)) static inline int __query(const struct pci_dev *dev) { // return dev->bus->number==0 && (dev->devfn==PCI_DEVFN(0,0)); // return dev->bus->number==1; // return dev->bus->number==0 && // (dev->devfn==PCI_DEVFN(2,0) || dev->devfn==PCI_DEVFN(3,0)); return 0; } /*****************************************************************************/ /* * */ static int pci_frv_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 _value; if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { _value = __get_PciBridgeDataL(where & ~3); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); _value = __get_PciCfgDataL(where & ~3); } switch (size) { case 1: _value = _value >> ((where & 3) * 8); break; case 2: _value = _value >> ((where & 2) * 8); break; case 4: break; default: BUG(); } *val = _value; return PCIBIOS_SUCCESSFUL; } static int pci_frv_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { switch (size) { case 1: if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __set_PciBridgeDataB(where, value); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); __set_PciCfgDataB(where, value); } break; case 2: if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __set_PciBridgeDataW(where, value); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); __set_PciCfgDataW(where, value); } break; case 4: if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) { __set_PciBridgeDataL(where, value); } else { __set_PciCfgAddr(CONFIG_CMD(bus, devfn, where)); __set_PciCfgDataL(where, value); } break; default: BUG(); } return PCIBIOS_SUCCESSFUL; } static struct pci_ops pci_direct_frv = { pci_frv_read_config, pci_frv_write_config, }; /* * Before we decide to use direct hardware access mechanisms, we try to do some * trivial checks to ensure it at least _seems_ to be working -- we just test * whether bus 00 contains a host bridge (this is similar to checking * techniques used in XFree86, but ours should be more reliable since we * attempt to make use of direct access hints provided by the PCI BIOS). * * This should be close to trivial, but it isn't, because there are buggy * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. */ static int __init pci_sanity_check(struct pci_ops *o) { struct pci_bus bus; /* Fake bus and device */ u32 id; bus.number = 0; if (o->read(&bus, 0, PCI_VENDOR_ID, 4, &id) == PCIBIOS_SUCCESSFUL) { printk("PCI: VDK Bridge device:vendor: %08x\n", id); if (id == 0x200e10cf) return 1; } printk("PCI: VDK Bridge: Sanity check failed\n"); return 0; } static struct pci_ops * __init pci_check_direct(void) { unsigned long flags; local_irq_save(flags); /* check if access works */ if (pci_sanity_check(&pci_direct_frv)) { local_irq_restore(flags); printk("PCI: Using configuration frv\n"); // request_mem_region(0xBE040000, 256, "FRV bridge"); // request_mem_region(0xBFFFFFF4, 12, "PCI frv"); return &pci_direct_frv; } local_irq_restore(flags); return NULL; } /* * Discover remaining PCI buses in case there are peer host bridges. * We use the number of last PCI bus provided by the PCI BIOS. */ static void __init pcibios_fixup_peer_bridges(void) { struct pci_bus bus; struct pci_dev dev; int n; u16 l; if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff) return; printk("PCI: Peer bridge fixup\n"); for (n=0; n <= pcibios_last_bus; n++) { if (pci_find_bus(0, n)) continue; bus.number = n; bus.ops = pci_root_ops; dev.bus = &bus; for(dev.devfn=0; dev.devfn<256; dev.devfn += 8) if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) && l != 0x0000 && l != 0xffff) { printk("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l); printk("PCI: Discovered peer bus %02x\n", n); pci_scan_bus(n, pci_root_ops, NULL); break; } } } /* * Exceptions for specific devices. Usually work-arounds for fatal design flaws. */ static void __init pci_fixup_umc_ide(struct pci_dev *d) { /* * UM8886BF IDE controller sets region type bits incorrectly, * therefore they look like memory despite of them being I/O. */ int i; printk("PCI: Fixing base address flags for device %s\n", pci_name(d)); for(i=0; i<4; i++) d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO; } static void pci_fixup_ide_bases(struct pci_dev *d) { int i; /* * PCI IDE controllers use non-standard I/O port decoding, respect it. */ if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE) return; printk("PCI: IDE base address fixup for %s\n", pci_name(d)); for(i=0; i<4; i++) { struct resource *r = &d->resource[i]; if ((r->start & ~0x80) == 0x374) { r->start |= 2; r->end = r->start; } } } static void pci_fixup_ide_trash(struct pci_dev *d) { int i; /* * There exist PCI IDE controllers which have utter garbage * in first four base registers. Ignore that. */ printk("PCI: IDE base address trash cleared for %s\n", pci_name(d)); for(i=0; i<4; i++) d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0; } static void pci_fixup_latency(struct pci_dev *d) { /* * SiS 5597 and 5598 chipsets require latency timer set to * at most 32 to avoid lockups. */ DBG("PCI: Setting max latency to 32\n"); pcibios_max_latency = 32; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, pci_fixup_ide_trash); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency); DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); /* * Called after each bus is probed, but before its children * are examined. */ void __init pcibios_fixup_bus(struct pci_bus *bus) { #if 0 printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); #endif pci_read_bridge_bases(bus); if (bus->number == 0) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->devfn == 0) { dev->resource[0].start = 0; dev->resource[0].end = 0; } } } } /* * Initialization. Try all known PCI access methods. Note that we support * using both PCI BIOS and direct access: in such cases, we use I/O ports * to access config space, but we still keep BIOS order of cards to be * compatible with 2.0.X. This should go away some day. */ int __init pcibios_init(void) { struct pci_ops *dir = NULL; LIST_HEAD(resources); if (!mb93090_mb00_detected) return -ENXIO; __reg_MB86943_sl_ctl |= MB86943_SL_CTL_DRCT_MASTER_SWAP | MB86943_SL_CTL_DRCT_SLAVE_SWAP; __reg_MB86943_ecs_base(1) = ((__region_CS2 + 0x01000000) >> 9) | 0x08000000; __reg_MB86943_ecs_base(2) = ((__region_CS2 + 0x00000000) >> 9) | 0x08000000; *(volatile uint32_t *) (__region_CS1 + 0x848) = 0xe0000000; *(volatile uint32_t *) (__region_CS1 + 0x8b8) = 0x00000000; __reg_MB86943_sl_pci_io_base = (__region_CS2 + 0x04000000) >> 9; __reg_MB86943_sl_pci_mem_base = (__region_CS2 + 0x08000000) >> 9; __reg_MB86943_pci_sl_io_base = __region_CS2 + 0x04000000; __reg_MB86943_pci_sl_mem_base = __region_CS2 + 0x08000000; mb(); /* enable PCI arbitration */ __reg_MB86943_pci_arbiter = MB86943_PCIARB_EN; pci_ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00; pci_ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff; pci_ioport_resource.end += pci_ioport_resource.start; printk("PCI IO window: %08llx-%08llx\n", (unsigned long long) pci_ioport_resource.start, (unsigned long long) pci_ioport_resource.end); pci_iomem_resource.start = (__reg_MB86943_sl_pci_mem_base << 9) & 0xfffffc00; pci_iomem_resource.end = (__reg_MB86943_sl_pci_mem_range << 9) | 0x3ff; pci_iomem_resource.end += pci_iomem_resource.start; /* Reserve somewhere to write to flush posted writes. This is used by * __flush_PCI_writes() from asm/io.h to force the write FIFO in the * CPU-PCI bridge to flush as this doesn't happen automatically when a * read is performed on the MB93090 development kit motherboard. */ pci_iomem_resource.start += 0x400; printk("PCI MEM window: %08llx-%08llx\n", (unsigned long long) pci_iomem_resource.start, (unsigned long long) pci_iomem_resource.end); printk("PCI DMA memory: %08lx-%08lx\n", dma_coherent_mem_start, dma_coherent_mem_end); if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0) panic("Unable to insert PCI IOMEM resource\n"); if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0) panic("Unable to insert PCI IOPORT resource\n"); if (!pci_probe) return -ENXIO; dir = pci_check_direct(); if (dir) pci_root_ops = dir; else { printk("PCI: No PCI bus detected\n"); return -ENXIO; } printk("PCI: Probing PCI hardware\n"); pci_add_resource(&resources, &pci_ioport_resource); pci_add_resource(&resources, &pci_iomem_resource); pci_scan_root_bus(NULL, 0, pci_root_ops, NULL, &resources); pcibios_irq_init(); pcibios_fixup_peer_bridges(); pcibios_fixup_irqs(); pcibios_resource_survey(); return 0; } arch_initcall(pcibios_init); char * __init pcibios_setup(char *str) { if (!strcmp(str, "off")) { pci_probe = 0; return NULL; } else if (!strncmp(str, "lastbus=", 8)) { pcibios_last_bus = simple_strtol(str+8, NULL, 0); return NULL; } return str; } int pcibios_enable_device(struct pci_dev *dev, int mask) { int err; if ((err = pci_enable_resources(dev, mask)) < 0) return err; if (!dev->msi_enabled) pcibios_enable_irq(dev); return 0; }
gpl-2.0
Explosion-y6/android_kernel_huawei_msm8909
fs/udf/namei.c
2042
34208
/* * namei.c * * PURPOSE * Inode name handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1998-2004 Ben Fennema * (C) 1999-2000 Stelias Computing Inc * * HISTORY * * 12/12/98 blf Created. Split out the lookup code from dir.c * 04/19/99 blf link, mknod, symlink support */ #include "udfdecl.h" #include "udf_i.h" #include "udf_sb.h" #include <linux/string.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/sched.h> #include <linux/crc-itu-t.h> #include <linux/exportfs.h> static inline int udf_match(int len1, const unsigned char *name1, int len2, const unsigned char *name2) { if (len1 != len2) return 0; return !memcmp(name1, name2, len1); } int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi, struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh, uint8_t *impuse, uint8_t *fileident) { uint16_t crclen = fibh->eoffset - fibh->soffset - sizeof(struct tag); uint16_t crc; int offset; uint16_t liu = le16_to_cpu(cfi->lengthOfImpUse); uint8_t lfi = cfi->lengthFileIdent; int padlen = fibh->eoffset - fibh->soffset - liu - lfi - sizeof(struct fileIdentDesc); int adinicb = 0; if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) adinicb = 1; offset = fibh->soffset + sizeof(struct fileIdentDesc); if (impuse) { if (adinicb || (offset + liu < 0)) { memcpy((uint8_t *)sfi->impUse, impuse, liu); } else if (offset >= 0) { memcpy(fibh->ebh->b_data + offset, impuse, liu); } else { memcpy((uint8_t *)sfi->impUse, impuse, -offset); memcpy(fibh->ebh->b_data, impuse - offset, liu + offset); } } offset += liu; if (fileident) { if (adinicb || (offset + lfi < 0)) { memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi); } else if (offset >= 0) { memcpy(fibh->ebh->b_data + offset, fileident, lfi); } else { memcpy((uint8_t *)sfi->fileIdent + liu, fileident, -offset); memcpy(fibh->ebh->b_data, fileident - offset, lfi + offset); } } offset += lfi; if (adinicb || (offset + padlen < 0)) { memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen); } else if (offset >= 0) { memset(fibh->ebh->b_data + offset, 0x00, padlen); } else { memset((uint8_t *)sfi->padding + liu + lfi, 0x00, -offset); memset(fibh->ebh->b_data, 0x00, padlen + offset); } crc = crc_itu_t(0, (uint8_t *)cfi + sizeof(struct tag), sizeof(struct fileIdentDesc) - sizeof(struct tag)); if (fibh->sbh == fibh->ebh) { crc = crc_itu_t(crc, (uint8_t *)sfi->impUse, crclen + sizeof(struct tag) - sizeof(struct fileIdentDesc)); } else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) { crc = crc_itu_t(crc, fibh->ebh->b_data + sizeof(struct fileIdentDesc) + fibh->soffset, crclen + sizeof(struct tag) - sizeof(struct fileIdentDesc)); } else { crc = crc_itu_t(crc, (uint8_t *)sfi->impUse, -fibh->soffset - sizeof(struct fileIdentDesc)); crc = crc_itu_t(crc, fibh->ebh->b_data, fibh->eoffset); } cfi->descTag.descCRC = cpu_to_le16(crc); cfi->descTag.descCRCLength = cpu_to_le16(crclen); cfi->descTag.tagChecksum = udf_tag_checksum(&cfi->descTag); if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset)) { memcpy((uint8_t *)sfi, (uint8_t *)cfi, sizeof(struct fileIdentDesc)); } else { memcpy((uint8_t *)sfi, (uint8_t *)cfi, -fibh->soffset); memcpy(fibh->ebh->b_data, (uint8_t *)cfi - fibh->soffset, sizeof(struct fileIdentDesc) + fibh->soffset); } if (adinicb) { mark_inode_dirty(inode); } else { if (fibh->sbh != fibh->ebh) mark_buffer_dirty_inode(fibh->ebh, inode); mark_buffer_dirty_inode(fibh->sbh, inode); } return 0; } static struct fileIdentDesc *udf_find_entry(struct inode *dir, const struct qstr *child, struct udf_fileident_bh *fibh, struct fileIdentDesc *cfi) { struct fileIdentDesc *fi = NULL; loff_t f_pos; int block, flen; unsigned char *fname = NULL; unsigned char *nameptr; uint8_t lfi; uint16_t liu; loff_t size; struct kernel_lb_addr eloc; uint32_t elen; sector_t offset; struct extent_position epos = {}; struct udf_inode_info *dinfo = UDF_I(dir); int isdotdot = child->len == 2 && child->name[0] == '.' && child->name[1] == '.'; size = udf_ext0_offset(dir) + dir->i_size; f_pos = udf_ext0_offset(dir); fibh->sbh = fibh->ebh = NULL; fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1); if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos, &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) goto out_err; block = udf_get_lb_pblock(dir->i_sb, &eloc, offset); if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(struct short_ad); else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) epos.offset -= sizeof(struct long_ad); } else offset = 0; fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); if (!fibh->sbh) goto out_err; } fname = kmalloc(UDF_NAME_LEN, GFP_NOFS); if (!fname) goto out_err; while (f_pos < size) { fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, &elen, &offset); if (!fi) goto out_err; liu = le16_to_cpu(cfi->lengthOfImpUse); lfi = cfi->lengthFileIdent; if (fibh->sbh == fibh->ebh) { nameptr = fi->fileIdent + liu; } else { int poffset; /* Unpaded ending offset */ poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi; if (poffset >= lfi) nameptr = (uint8_t *)(fibh->ebh->b_data + poffset - lfi); else { nameptr = fname; memcpy(nameptr, fi->fileIdent + liu, lfi - poffset); memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset); } } if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE)) continue; } if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) { if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE)) continue; } if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) && isdotdot) goto out_ok; if (!lfi) continue; flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); if (flen && udf_match(flen, fname, child->len, child->name)) goto out_ok; } out_err: fi = NULL; if (fibh->sbh != fibh->ebh) brelse(fibh->ebh); brelse(fibh->sbh); out_ok: brelse(epos.bh); kfree(fname); return fi; } static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode = NULL; struct fileIdentDesc cfi; struct udf_fileident_bh fibh; if (dentry->d_name.len > UDF_NAME_LEN - 2) return ERR_PTR(-ENAMETOOLONG); #ifdef UDF_RECOVERY /* temporary shorthand for specifying files by inode number */ if (!strncmp(dentry->d_name.name, ".B=", 3)) { struct kernel_lb_addr lb = { .logicalBlockNum = 0, .partitionReferenceNum = simple_strtoul(dentry->d_name.name + 3, NULL, 0), }; inode = udf_iget(dir->i_sb, lb); if (!inode) { return ERR_PTR(-EACCES); } } else #endif /* UDF_RECOVERY */ if (udf_find_entry(dir, &dentry->d_name, &fibh, &cfi)) { struct kernel_lb_addr loc; if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); loc = lelb_to_cpu(cfi.icb.extLocation); inode = udf_iget(dir->i_sb, &loc); if (!inode) { return ERR_PTR(-EACCES); } } return d_splice_alias(inode, dentry); } static struct fileIdentDesc *udf_add_entry(struct inode *dir, struct dentry *dentry, struct udf_fileident_bh *fibh, struct fileIdentDesc *cfi, int *err) { struct super_block *sb = dir->i_sb; struct fileIdentDesc *fi = NULL; unsigned char *name = NULL; int namelen; loff_t f_pos; loff_t size = udf_ext0_offset(dir) + dir->i_size; int nfidlen; uint8_t lfi; uint16_t liu; int block; struct kernel_lb_addr eloc; uint32_t elen = 0; sector_t offset; struct extent_position epos = {}; struct udf_inode_info *dinfo; fibh->sbh = fibh->ebh = NULL; name = kmalloc(UDF_NAME_LEN, GFP_NOFS); if (!name) { *err = -ENOMEM; goto out_err; } if (dentry) { if (!dentry->d_name.len) { *err = -EINVAL; goto out_err; } namelen = udf_put_filename(sb, dentry->d_name.name, name, dentry->d_name.len); if (!namelen) { *err = -ENAMETOOLONG; goto out_err; } } else { namelen = 0; } nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3; f_pos = udf_ext0_offset(dir); fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1); dinfo = UDF_I(dir); if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos, &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) { block = udf_get_lb_pblock(dir->i_sb, &dinfo->i_location, 0); fibh->soffset = fibh->eoffset = sb->s_blocksize; goto add; } block = udf_get_lb_pblock(dir->i_sb, &eloc, offset); if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(struct short_ad); else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) epos.offset -= sizeof(struct long_ad); } else offset = 0; fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); if (!fibh->sbh) { *err = -EIO; goto out_err; } block = dinfo->i_location.logicalBlockNum; } while (f_pos < size) { fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, &elen, &offset); if (!fi) { *err = -EIO; goto out_err; } liu = le16_to_cpu(cfi->lengthOfImpUse); lfi = cfi->lengthFileIdent; if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen) { cfi->descTag.tagSerialNum = cpu_to_le16(1); cfi->fileVersionNum = cpu_to_le16(1); cfi->fileCharacteristics = 0; cfi->lengthFileIdent = namelen; cfi->lengthOfImpUse = cpu_to_le16(0); if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) goto out_ok; else { *err = -EIO; goto out_err; } } } } add: f_pos += nfidlen; if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && sb->s_blocksize - fibh->eoffset < nfidlen) { brelse(epos.bh); epos.bh = NULL; fibh->soffset -= udf_ext0_offset(dir); fibh->eoffset -= udf_ext0_offset(dir); f_pos -= udf_ext0_offset(dir); if (fibh->sbh != fibh->ebh) brelse(fibh->ebh); brelse(fibh->sbh); fibh->sbh = fibh->ebh = udf_expand_dir_adinicb(dir, &block, err); if (!fibh->sbh) goto out_err; epos.block = dinfo->i_location; epos.offset = udf_file_entry_alloc_offset(dir); /* Load extent udf_expand_dir_adinicb() has created */ udf_current_aext(dir, &epos, &eloc, &elen, 1); } /* Entry fits into current block? */ if (sb->s_blocksize - fibh->eoffset >= nfidlen) { fibh->soffset = fibh->eoffset; fibh->eoffset += nfidlen; if (fibh->sbh != fibh->ebh) { brelse(fibh->sbh); fibh->sbh = fibh->ebh; } if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { block = dinfo->i_location.logicalBlockNum; fi = (struct fileIdentDesc *) (dinfo->i_ext.i_data + fibh->soffset - udf_ext0_offset(dir) + dinfo->i_lenEAttr); } else { block = eloc.logicalBlockNum + ((elen - 1) >> dir->i_sb->s_blocksize_bits); fi = (struct fileIdentDesc *) (fibh->sbh->b_data + fibh->soffset); } } else { /* Round up last extent in the file */ elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1); if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(struct short_ad); else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) epos.offset -= sizeof(struct long_ad); udf_write_aext(dir, &epos, &eloc, elen, 1); dinfo->i_lenExtents = (dinfo->i_lenExtents + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1); fibh->soffset = fibh->eoffset - sb->s_blocksize; fibh->eoffset += nfidlen - sb->s_blocksize; if (fibh->sbh != fibh->ebh) { brelse(fibh->sbh); fibh->sbh = fibh->ebh; } block = eloc.logicalBlockNum + ((elen - 1) >> dir->i_sb->s_blocksize_bits); fibh->ebh = udf_bread(dir, f_pos >> dir->i_sb->s_blocksize_bits, 1, err); if (!fibh->ebh) goto out_err; /* Extents could have been merged, invalidate our position */ brelse(epos.bh); epos.bh = NULL; epos.block = dinfo->i_location; epos.offset = udf_file_entry_alloc_offset(dir); if (!fibh->soffset) { /* Find the freshly allocated block */ while (udf_next_aext(dir, &epos, &eloc, &elen, 1) == (EXT_RECORDED_ALLOCATED >> 30)) ; block = eloc.logicalBlockNum + ((elen - 1) >> dir->i_sb->s_blocksize_bits); brelse(fibh->sbh); fibh->sbh = fibh->ebh; fi = (struct fileIdentDesc *)(fibh->sbh->b_data); } else { fi = (struct fileIdentDesc *) (fibh->sbh->b_data + sb->s_blocksize + fibh->soffset); } } memset(cfi, 0, sizeof(struct fileIdentDesc)); if (UDF_SB(sb)->s_udfrev >= 0x0200) udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block, sizeof(struct tag)); else udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block, sizeof(struct tag)); cfi->fileVersionNum = cpu_to_le16(1); cfi->lengthFileIdent = namelen; cfi->lengthOfImpUse = cpu_to_le16(0); if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) { dir->i_size += nfidlen; if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) dinfo->i_lenAlloc += nfidlen; else { /* Find the last extent and truncate it to proper size */ while (udf_next_aext(dir, &epos, &eloc, &elen, 1) == (EXT_RECORDED_ALLOCATED >> 30)) ; elen -= dinfo->i_lenExtents - dir->i_size; if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(struct short_ad); else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) epos.offset -= sizeof(struct long_ad); udf_write_aext(dir, &epos, &eloc, elen, 1); dinfo->i_lenExtents = dir->i_size; } mark_inode_dirty(dir); goto out_ok; } else { *err = -EIO; goto out_err; } out_err: fi = NULL; if (fibh->sbh != fibh->ebh) brelse(fibh->ebh); brelse(fibh->sbh); out_ok: brelse(epos.bh); kfree(name); return fi; } static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi, struct udf_fileident_bh *fibh, struct fileIdentDesc *cfi) { cfi->fileCharacteristics |= FID_FILE_CHAR_DELETED; if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)) memset(&(cfi->icb), 0x00, sizeof(struct long_ad)); return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL); } static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct udf_fileident_bh fibh; struct inode *inode; struct fileIdentDesc cfi, *fi; int err; struct udf_inode_info *iinfo; inode = udf_new_inode(dir, mode, &err); if (!inode) { return err; } iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) inode->i_data.a_ops = &udf_adinicb_aops; else inode->i_data.a_ops = &udf_aops; inode->i_op = &udf_file_inode_operations; inode->i_fop = &udf_file_operations; mark_inode_dirty(inode); fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { inode_dec_link_count(inode); iput(inode); return err; } cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL); udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) mark_inode_dirty(dir); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); d_instantiate(dentry, inode); return 0; } static int udf_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct inode *inode; struct udf_fileident_bh fibh; struct fileIdentDesc cfi, *fi; int err; struct udf_inode_info *iinfo; if (!old_valid_dev(rdev)) return -EINVAL; err = -EIO; inode = udf_new_inode(dir, mode, &err); if (!inode) goto out; iinfo = UDF_I(inode); init_special_inode(inode, mode, rdev); fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { inode_dec_link_count(inode); iput(inode); return err; } cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL); udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) mark_inode_dirty(dir); mark_inode_dirty(inode); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); d_instantiate(dentry, inode); err = 0; out: return err; } static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; struct udf_fileident_bh fibh; struct fileIdentDesc cfi, *fi; int err; struct udf_inode_info *dinfo = UDF_I(dir); struct udf_inode_info *iinfo; err = -EIO; inode = udf_new_inode(dir, S_IFDIR | mode, &err); if (!inode) goto out; iinfo = UDF_I(inode); inode->i_op = &udf_dir_inode_operations; inode->i_fop = &udf_dir_operations; fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err); if (!fi) { inode_dec_link_count(inode); iput(inode); goto out; } set_nlink(inode, 2); cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(dinfo->i_location); *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(dinfo->i_unique & 0x00000000FFFFFFFFUL); cfi.fileCharacteristics = FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT; udf_write_fi(inode, &cfi, fi, &fibh, NULL, NULL); brelse(fibh.sbh); mark_inode_dirty(inode); fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { clear_nlink(inode); mark_inode_dirty(inode); iput(inode); goto out; } cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL); cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY; udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); inc_nlink(dir); mark_inode_dirty(dir); d_instantiate(dentry, inode); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); err = 0; out: return err; } static int empty_dir(struct inode *dir) { struct fileIdentDesc *fi, cfi; struct udf_fileident_bh fibh; loff_t f_pos; loff_t size = udf_ext0_offset(dir) + dir->i_size; int block; struct kernel_lb_addr eloc; uint32_t elen; sector_t offset; struct extent_position epos = {}; struct udf_inode_info *dinfo = UDF_I(dir); f_pos = udf_ext0_offset(dir); fibh.soffset = fibh.eoffset = f_pos & (dir->i_sb->s_blocksize - 1); if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) fibh.sbh = fibh.ebh = NULL; else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { block = udf_get_lb_pblock(dir->i_sb, &eloc, offset); if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(struct short_ad); else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) epos.offset -= sizeof(struct long_ad); } else offset = 0; fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block); if (!fibh.sbh) { brelse(epos.bh); return 0; } } else { brelse(epos.bh); return 0; } while (f_pos < size) { fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc, &elen, &offset); if (!fi) { if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); brelse(epos.bh); return 0; } if (cfi.lengthFileIdent && (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0) { if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); brelse(epos.bh); return 0; } } if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); brelse(epos.bh); return 1; } static int udf_rmdir(struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode = dentry->d_inode; struct udf_fileident_bh fibh; struct fileIdentDesc *fi, cfi; struct kernel_lb_addr tloc; retval = -ENOENT; fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); if (!fi) goto out; retval = -EIO; tloc = lelb_to_cpu(cfi.icb.extLocation); if (udf_get_lb_pblock(dir->i_sb, &tloc, 0) != inode->i_ino) goto end_rmdir; retval = -ENOTEMPTY; if (!empty_dir(inode)) goto end_rmdir; retval = udf_delete_entry(dir, fi, &fibh, &cfi); if (retval) goto end_rmdir; if (inode->i_nlink != 2) udf_warn(inode->i_sb, "empty directory has nlink != 2 (%d)\n", inode->i_nlink); clear_nlink(inode); inode->i_size = 0; inode_dec_link_count(dir); inode->i_ctime = dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb); mark_inode_dirty(dir); end_rmdir: if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); out: return retval; } static int udf_unlink(struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode = dentry->d_inode; struct udf_fileident_bh fibh; struct fileIdentDesc *fi; struct fileIdentDesc cfi; struct kernel_lb_addr tloc; retval = -ENOENT; fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); if (!fi) goto out; retval = -EIO; tloc = lelb_to_cpu(cfi.icb.extLocation); if (udf_get_lb_pblock(dir->i_sb, &tloc, 0) != inode->i_ino) goto end_unlink; if (!inode->i_nlink) { udf_debug("Deleting nonexistent file (%lu), %d\n", inode->i_ino, inode->i_nlink); set_nlink(inode, 1); } retval = udf_delete_entry(dir, fi, &fibh, &cfi); if (retval) goto end_unlink; dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb); mark_inode_dirty(dir); inode_dec_link_count(inode); inode->i_ctime = dir->i_ctime; retval = 0; end_unlink: if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); out: return retval; } static int udf_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; struct pathComponent *pc; const char *compstart; struct udf_fileident_bh fibh; struct extent_position epos = {}; int eoffset, elen = 0; struct fileIdentDesc *fi; struct fileIdentDesc cfi; uint8_t *ea; int err; int block; unsigned char *name = NULL; int namelen; struct udf_inode_info *iinfo; struct super_block *sb = dir->i_sb; inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err); if (!inode) goto out; iinfo = UDF_I(inode); down_write(&iinfo->i_data_sem); name = kmalloc(UDF_NAME_LEN, GFP_NOFS); if (!name) { err = -ENOMEM; goto out_no_entry; } inode->i_data.a_ops = &udf_symlink_aops; inode->i_op = &udf_symlink_inode_operations; if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { struct kernel_lb_addr eloc; uint32_t bsize; block = udf_new_block(sb, inode, iinfo->i_location.partitionReferenceNum, iinfo->i_location.logicalBlockNum, &err); if (!block) goto out_no_entry; epos.block = iinfo->i_location; epos.offset = udf_file_entry_alloc_offset(inode); epos.bh = NULL; eloc.logicalBlockNum = block; eloc.partitionReferenceNum = iinfo->i_location.partitionReferenceNum; bsize = sb->s_blocksize; iinfo->i_lenExtents = bsize; udf_add_aext(inode, &epos, &eloc, bsize, 0); brelse(epos.bh); block = udf_get_pblock(sb, block, iinfo->i_location.partitionReferenceNum, 0); epos.bh = udf_tgetblk(sb, block); lock_buffer(epos.bh); memset(epos.bh->b_data, 0x00, bsize); set_buffer_uptodate(epos.bh); unlock_buffer(epos.bh); mark_buffer_dirty_inode(epos.bh, inode); ea = epos.bh->b_data + udf_ext0_offset(inode); } else ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr; eoffset = sb->s_blocksize - udf_ext0_offset(inode); pc = (struct pathComponent *)ea; if (*symname == '/') { do { symname++; } while (*symname == '/'); pc->componentType = 1; pc->lengthComponentIdent = 0; pc->componentFileVersionNum = 0; elen += sizeof(struct pathComponent); } err = -ENAMETOOLONG; while (*symname) { if (elen + sizeof(struct pathComponent) > eoffset) goto out_no_entry; pc = (struct pathComponent *)(ea + elen); compstart = symname; do { symname++; } while (*symname && *symname != '/'); pc->componentType = 5; pc->lengthComponentIdent = 0; pc->componentFileVersionNum = 0; if (compstart[0] == '.') { if ((symname - compstart) == 1) pc->componentType = 4; else if ((symname - compstart) == 2 && compstart[1] == '.') pc->componentType = 3; } if (pc->componentType == 5) { namelen = udf_put_filename(sb, compstart, name, symname - compstart); if (!namelen) goto out_no_entry; if (elen + sizeof(struct pathComponent) + namelen > eoffset) goto out_no_entry; else pc->lengthComponentIdent = namelen; memcpy(pc->componentIdent, name, namelen); } elen += sizeof(struct pathComponent) + pc->lengthComponentIdent; if (*symname) { do { symname++; } while (*symname == '/'); } } brelse(epos.bh); inode->i_size = elen; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) iinfo->i_lenAlloc = inode->i_size; else udf_truncate_tail_extent(inode); mark_inode_dirty(inode); fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) goto out_no_entry; cfi.icb.extLength = cpu_to_le32(sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); if (UDF_SB(inode->i_sb)->s_lvid_bh) { *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(lvid_get_unique_id(sb)); } udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) mark_inode_dirty(dir); up_write(&iinfo->i_data_sem); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); d_instantiate(dentry, inode); err = 0; out: kfree(name); return err; out_no_entry: up_write(&iinfo->i_data_sem); inode_dec_link_count(inode); iput(inode); goto out; } static int udf_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct udf_fileident_bh fibh; struct fileIdentDesc cfi, *fi; int err; fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { return err; } cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location); if (UDF_SB(inode->i_sb)->s_lvid_bh) { *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(lvid_get_unique_id(inode->i_sb)); } udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) mark_inode_dirty(dir); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); inc_nlink(inode); inode->i_ctime = current_fs_time(inode->i_sb); mark_inode_dirty(inode); ihold(inode); d_instantiate(dentry, inode); return 0; } /* Anybody can rename anything with this: the permission checks are left to the * higher-level routines. */ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct udf_fileident_bh ofibh, nfibh; struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL; struct fileIdentDesc ocfi, ncfi; struct buffer_head *dir_bh = NULL; int retval = -ENOENT; struct kernel_lb_addr tloc; struct udf_inode_info *old_iinfo = UDF_I(old_inode); ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); if (ofi) { if (ofibh.sbh != ofibh.ebh) brelse(ofibh.ebh); brelse(ofibh.sbh); } tloc = lelb_to_cpu(ocfi.icb.extLocation); if (!ofi || udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) != old_inode->i_ino) goto end_rename; nfi = udf_find_entry(new_dir, &new_dentry->d_name, &nfibh, &ncfi); if (nfi) { if (!new_inode) { if (nfibh.sbh != nfibh.ebh) brelse(nfibh.ebh); brelse(nfibh.sbh); nfi = NULL; } } if (S_ISDIR(old_inode->i_mode)) { int offset = udf_ext0_offset(old_inode); if (new_inode) { retval = -ENOTEMPTY; if (!empty_dir(new_inode)) goto end_rename; } retval = -EIO; if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { dir_fi = udf_get_fileident( old_iinfo->i_ext.i_data - (old_iinfo->i_efe ? sizeof(struct extendedFileEntry) : sizeof(struct fileEntry)), old_inode->i_sb->s_blocksize, &offset); } else { dir_bh = udf_bread(old_inode, 0, 0, &retval); if (!dir_bh) goto end_rename; dir_fi = udf_get_fileident(dir_bh->b_data, old_inode->i_sb->s_blocksize, &offset); } if (!dir_fi) goto end_rename; tloc = lelb_to_cpu(dir_fi->icb.extLocation); if (udf_get_lb_pblock(old_inode->i_sb, &tloc, 0) != old_dir->i_ino) goto end_rename; } if (!nfi) { nfi = udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi, &retval); if (!nfi) goto end_rename; } /* * Like most other Unix systems, set the ctime for inodes on a * rename. */ old_inode->i_ctime = current_fs_time(old_inode->i_sb); mark_inode_dirty(old_inode); /* * ok, that's it */ ncfi.fileVersionNum = ocfi.fileVersionNum; ncfi.fileCharacteristics = ocfi.fileCharacteristics; memcpy(&(ncfi.icb), &(ocfi.icb), sizeof(struct long_ad)); udf_write_fi(new_dir, &ncfi, nfi, &nfibh, NULL, NULL); /* The old fid may have moved - find it again */ ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); udf_delete_entry(old_dir, ofi, &ofibh, &ocfi); if (new_inode) { new_inode->i_ctime = current_fs_time(new_inode->i_sb); inode_dec_link_count(new_inode); } old_dir->i_ctime = old_dir->i_mtime = current_fs_time(old_dir->i_sb); mark_inode_dirty(old_dir); if (dir_fi) { dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location); udf_update_tag((char *)dir_fi, (sizeof(struct fileIdentDesc) + le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3); if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) mark_inode_dirty(old_inode); else mark_buffer_dirty_inode(dir_bh, old_inode); inode_dec_link_count(old_dir); if (new_inode) inode_dec_link_count(new_inode); else { inc_nlink(new_dir); mark_inode_dirty(new_dir); } } if (ofi) { if (ofibh.sbh != ofibh.ebh) brelse(ofibh.ebh); brelse(ofibh.sbh); } retval = 0; end_rename: brelse(dir_bh); if (nfi) { if (nfibh.sbh != nfibh.ebh) brelse(nfibh.ebh); brelse(nfibh.sbh); } return retval; } static struct dentry *udf_get_parent(struct dentry *child) { struct kernel_lb_addr tloc; struct inode *inode = NULL; struct qstr dotdot = QSTR_INIT("..", 2); struct fileIdentDesc cfi; struct udf_fileident_bh fibh; if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi)) goto out_unlock; if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); tloc = lelb_to_cpu(cfi.icb.extLocation); inode = udf_iget(child->d_inode->i_sb, &tloc); if (!inode) goto out_unlock; return d_obtain_alias(inode); out_unlock: return ERR_PTR(-EACCES); } static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block, u16 partref, __u32 generation) { struct inode *inode; struct kernel_lb_addr loc; if (block == 0) return ERR_PTR(-ESTALE); loc.logicalBlockNum = block; loc.partitionReferenceNum = partref; inode = udf_iget(sb, &loc); if (inode == NULL) return ERR_PTR(-ENOMEM); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return d_obtain_alias(inode); } static struct dentry *udf_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { if ((fh_len != 3 && fh_len != 5) || (fh_type != FILEID_UDF_WITH_PARENT && fh_type != FILEID_UDF_WITHOUT_PARENT)) return NULL; return udf_nfs_get_inode(sb, fid->udf.block, fid->udf.partref, fid->udf.generation); } static struct dentry *udf_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { if (fh_len != 5 || fh_type != FILEID_UDF_WITH_PARENT) return NULL; return udf_nfs_get_inode(sb, fid->udf.parent_block, fid->udf.parent_partref, fid->udf.parent_generation); } static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent) { int len = *lenp; struct kernel_lb_addr location = UDF_I(inode)->i_location; struct fid *fid = (struct fid *)fh; int type = FILEID_UDF_WITHOUT_PARENT; if (parent && (len < 5)) { *lenp = 5; return FILEID_INVALID; } else if (len < 3) { *lenp = 3; return FILEID_INVALID; } *lenp = 3; fid->udf.block = location.logicalBlockNum; fid->udf.partref = location.partitionReferenceNum; fid->udf.parent_partref = 0; fid->udf.generation = inode->i_generation; if (parent) { location = UDF_I(parent)->i_location; fid->udf.parent_block = location.logicalBlockNum; fid->udf.parent_partref = location.partitionReferenceNum; fid->udf.parent_generation = inode->i_generation; *lenp = 5; type = FILEID_UDF_WITH_PARENT; } return type; } const struct export_operations udf_export_ops = { .encode_fh = udf_encode_fh, .fh_to_dentry = udf_fh_to_dentry, .fh_to_parent = udf_fh_to_parent, .get_parent = udf_get_parent, }; const struct inode_operations udf_dir_inode_operations = { .lookup = udf_lookup, .create = udf_create, .link = udf_link, .unlink = udf_unlink, .symlink = udf_symlink, .mkdir = udf_mkdir, .rmdir = udf_rmdir, .mknod = udf_mknod, .rename = udf_rename, }; const struct inode_operations udf_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, };
gpl-2.0
yandex/smart
drivers/staging/bcm/led_control.c
2298
26808
#include "headers.h" #define STATUS_IMAGE_CHECKSUM_MISMATCH -199 #define EVENT_SIGNALED 1 static B_UINT16 CFG_CalculateChecksum(B_UINT8 *pu8Buffer, B_UINT32 u32Size) { B_UINT16 u16CheckSum = 0; while (u32Size--) { u16CheckSum += (B_UINT8)~(*pu8Buffer); pu8Buffer++; } return u16CheckSum; } BOOLEAN IsReqGpioIsLedInNVM(struct bcm_mini_adapter *Adapter, UINT gpios) { INT Status; Status = (Adapter->gpioBitMap & gpios) ^ gpios; if (Status) return FALSE; else return TRUE; } static INT LED_Blink(struct bcm_mini_adapter *Adapter, UINT GPIO_Num, UCHAR uiLedIndex, ULONG timeout, INT num_of_time, enum bcm_led_events currdriverstate) { int Status = STATUS_SUCCESS; BOOLEAN bInfinite = FALSE; /* Check if num_of_time is -ve. If yes, blink led in infinite loop */ if (num_of_time < 0) { bInfinite = TRUE; num_of_time = 1; } while (num_of_time) { if (currdriverstate == Adapter->DriverState) TURN_ON_LED(GPIO_Num, uiLedIndex); /* Wait for timeout after setting on the LED */ Status = wait_event_interruptible_timeout( Adapter->LEDInfo.notify_led_event, currdriverstate != Adapter->DriverState || kthread_should_stop(), msecs_to_jiffies(timeout)); if (kthread_should_stop()) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting"); Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED; TURN_OFF_LED(GPIO_Num, uiLedIndex); Status = EVENT_SIGNALED; break; } if (Status) { TURN_OFF_LED(GPIO_Num, uiLedIndex); Status = EVENT_SIGNALED; break; } TURN_OFF_LED(GPIO_Num, uiLedIndex); Status = wait_event_interruptible_timeout( Adapter->LEDInfo.notify_led_event, currdriverstate != Adapter->DriverState || kthread_should_stop(), msecs_to_jiffies(timeout)); if (bInfinite == FALSE) num_of_time--; } return Status; } static INT ScaleRateofTransfer(ULONG rate) { if (rate <= 3) return rate; else if ((rate > 3) && (rate <= 100)) return 5; else if ((rate > 100) && (rate <= 200)) return 6; else if ((rate > 200) && (rate <= 300)) return 7; else if ((rate > 300) && (rate <= 400)) return 8; else if ((rate > 400) && (rate <= 500)) return 9; else if ((rate > 500) && (rate <= 600)) return 10; else return MAX_NUM_OF_BLINKS; } static INT LED_Proportional_Blink(struct bcm_mini_adapter *Adapter, UCHAR GPIO_Num_tx, UCHAR uiTxLedIndex, UCHAR GPIO_Num_rx, UCHAR uiRxLedIndex, enum bcm_led_events currdriverstate) { /* Initial values of TX and RX packets */ ULONG64 Initial_num_of_packts_tx = 0, Initial_num_of_packts_rx = 0; /* values of TX and RX packets after 1 sec */ ULONG64 Final_num_of_packts_tx = 0, Final_num_of_packts_rx = 0; /* Rate of transfer of Tx and Rx in 1 sec */ ULONG64 rate_of_transfer_tx = 0, rate_of_transfer_rx = 0; int Status = STATUS_SUCCESS; INT num_of_time = 0, num_of_time_tx = 0, num_of_time_rx = 0; UINT remDelay = 0; BOOLEAN bBlinkBothLED = TRUE; /* UINT GPIO_num = DISABLE_GPIO_NUM; */ ulong timeout = 0; /* Read initial value of packets sent/received */ Initial_num_of_packts_tx = Adapter->dev->stats.tx_packets; Initial_num_of_packts_rx = Adapter->dev->stats.rx_packets; /* Scale the rate of transfer to no of blinks. */ num_of_time_tx = ScaleRateofTransfer((ULONG)rate_of_transfer_tx); num_of_time_rx = ScaleRateofTransfer((ULONG)rate_of_transfer_rx); while ((Adapter->device_removed == FALSE)) { timeout = 50; /* * Blink Tx and Rx LED when both Tx and Rx is * in normal bandwidth */ if (bBlinkBothLED) { /* * Assign minimum number of blinks of * either Tx or Rx. */ if (num_of_time_tx > num_of_time_rx) num_of_time = num_of_time_rx; else num_of_time = num_of_time_tx; if (num_of_time > 0) { /* Blink both Tx and Rx LEDs */ if (LED_Blink(Adapter, 1 << GPIO_Num_tx, uiTxLedIndex, timeout, num_of_time, currdriverstate) == EVENT_SIGNALED) return EVENT_SIGNALED; if (LED_Blink(Adapter, 1 << GPIO_Num_rx, uiRxLedIndex, timeout, num_of_time, currdriverstate) == EVENT_SIGNALED) return EVENT_SIGNALED; } if (num_of_time == num_of_time_tx) { /* Blink pending rate of Rx */ if (LED_Blink(Adapter, (1 << GPIO_Num_rx), uiRxLedIndex, timeout, num_of_time_rx-num_of_time, currdriverstate) == EVENT_SIGNALED) return EVENT_SIGNALED; num_of_time = num_of_time_rx; } else { /* Blink pending rate of Tx */ if (LED_Blink(Adapter, 1 << GPIO_Num_tx, uiTxLedIndex, timeout, num_of_time_tx-num_of_time, currdriverstate) == EVENT_SIGNALED) return EVENT_SIGNALED; num_of_time = num_of_time_tx; } } else { if (num_of_time == num_of_time_tx) { /* Blink pending rate of Rx */ if (LED_Blink(Adapter, 1 << GPIO_Num_tx, uiTxLedIndex, timeout, num_of_time, currdriverstate) == EVENT_SIGNALED) return EVENT_SIGNALED; } else { /* Blink pending rate of Tx */ if (LED_Blink(Adapter, 1 << GPIO_Num_rx, uiRxLedIndex, timeout, num_of_time, currdriverstate) == EVENT_SIGNALED) return EVENT_SIGNALED; } } /* * If Tx/Rx rate is less than maximum blinks per second, * wait till delay completes to 1 second */ remDelay = MAX_NUM_OF_BLINKS - num_of_time; if (remDelay > 0) { timeout = 100 * remDelay; Status = wait_event_interruptible_timeout( Adapter->LEDInfo.notify_led_event, currdriverstate != Adapter->DriverState || kthread_should_stop(), msecs_to_jiffies(timeout)); if (kthread_should_stop()) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting"); Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED; return EVENT_SIGNALED; } if (Status) return EVENT_SIGNALED; } /* Turn off both Tx and Rx LEDs before next second */ TURN_OFF_LED(1 << GPIO_Num_tx, uiTxLedIndex); TURN_OFF_LED(1 << GPIO_Num_rx, uiTxLedIndex); /* * Read the Tx & Rx packets transmission after 1 second and * calculate rate of transfer */ Final_num_of_packts_tx = Adapter->dev->stats.tx_packets; Final_num_of_packts_rx = Adapter->dev->stats.rx_packets; rate_of_transfer_tx = Final_num_of_packts_tx - Initial_num_of_packts_tx; rate_of_transfer_rx = Final_num_of_packts_rx - Initial_num_of_packts_rx; /* Read initial value of packets sent/received */ Initial_num_of_packts_tx = Final_num_of_packts_tx; Initial_num_of_packts_rx = Final_num_of_packts_rx; /* Scale the rate of transfer to no of blinks. */ num_of_time_tx = ScaleRateofTransfer((ULONG)rate_of_transfer_tx); num_of_time_rx = ScaleRateofTransfer((ULONG)rate_of_transfer_rx); } return Status; } /* * ----------------------------------------------------------------------------- * Procedure: ValidateDSDParamsChecksum * * Description: Reads DSD Params and validates checkusm. * * Arguments: * Adapter - Pointer to Adapter structure. * ulParamOffset - Start offset of the DSD parameter to be read and * validated. * usParamLen - Length of the DSD Parameter. * * Returns: * <OSAL_STATUS_CODE> * ----------------------------------------------------------------------------- */ static INT ValidateDSDParamsChecksum(struct bcm_mini_adapter *Adapter, ULONG ulParamOffset, USHORT usParamLen) { INT Status = STATUS_SUCCESS; PUCHAR puBuffer = NULL; USHORT usChksmOrg = 0; USHORT usChecksumCalculated = 0; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X", ulParamOffset, usParamLen); puBuffer = kmalloc(usParamLen, GFP_KERNEL); if (!puBuffer) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread: ValidateDSDParamsChecksum Allocation failed"); return -ENOMEM; } /* Read the DSD data from the parameter offset. */ if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)puBuffer, ulParamOffset, usParamLen)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed"); Status = STATUS_IMAGE_CHECKSUM_MISMATCH; goto exit; } /* Calculate the checksum of the data read from the DSD parameter. */ usChecksumCalculated = CFG_CalculateChecksum(puBuffer, usParamLen); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread: usCheckSumCalculated = 0x%x\n", usChecksumCalculated); /* * End of the DSD parameter will have a TWO bytes checksum stored in it. * Read it and compare with the calculated Checksum. */ if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)&usChksmOrg, ulParamOffset+usParamLen, 2)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed"); Status = STATUS_IMAGE_CHECKSUM_MISMATCH; goto exit; } usChksmOrg = ntohs(usChksmOrg); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread: usChksmOrg = 0x%x", usChksmOrg); /* * Compare the checksum calculated with the checksum read * from DSD section */ if (usChecksumCalculated ^ usChksmOrg) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread: ValidateDSDParamsChecksum: Checksums don't match"); Status = STATUS_IMAGE_CHECKSUM_MISMATCH; goto exit; } exit: kfree(puBuffer); return Status; } /* * ----------------------------------------------------------------------------- * Procedure: ValidateHWParmStructure * * Description: Validates HW Parameters. * * Arguments: * Adapter - Pointer to Adapter structure. * ulHwParamOffset - Start offset of the HW parameter Section to be read * and validated. * * Returns: * <OSAL_STATUS_CODE> * ----------------------------------------------------------------------------- */ static INT ValidateHWParmStructure(struct bcm_mini_adapter *Adapter, ULONG ulHwParamOffset) { INT Status = STATUS_SUCCESS; USHORT HwParamLen = 0; /* * Add DSD start offset to the hwParamOffset to get * the actual address. */ ulHwParamOffset += DSD_START_OFFSET; /* Read the Length of HW_PARAM structure */ BeceemNVMRead(Adapter, (PUINT)&HwParamLen, ulHwParamOffset, 2); HwParamLen = ntohs(HwParamLen); if (0 == HwParamLen || HwParamLen > Adapter->uiNVMDSDSize) return STATUS_IMAGE_CHECKSUM_MISMATCH; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread:HwParamLen = 0x%x", HwParamLen); Status = ValidateDSDParamsChecksum(Adapter, ulHwParamOffset, HwParamLen); return Status; } /* ValidateHWParmStructure() */ static int ReadLEDInformationFromEEPROM(struct bcm_mini_adapter *Adapter, UCHAR GPIO_Array[]) { int Status = STATUS_SUCCESS; ULONG dwReadValue = 0; USHORT usHwParamData = 0; USHORT usEEPROMVersion = 0; UCHAR ucIndex = 0; UCHAR ucGPIOInfo[32] = {0}; BeceemNVMRead(Adapter, (PUINT)&usEEPROMVersion, EEPROM_VERSION_OFFSET, 2); BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "usEEPROMVersion: Minor:0x%X Major:0x%x", usEEPROMVersion&0xFF, ((usEEPROMVersion>>8)&0xFF)); if (((usEEPROMVersion>>8)&0xFF) < EEPROM_MAP5_MAJORVERSION) { BeceemNVMRead(Adapter, (PUINT)&usHwParamData, EEPROM_HW_PARAM_POINTER_ADDRESS, 2); usHwParamData = ntohs(usHwParamData); dwReadValue = usHwParamData; } else { /* * Validate Compatibility section and then read HW param * if compatibility section is valid. */ Status = ValidateDSDParamsChecksum(Adapter, DSD_START_OFFSET, COMPATIBILITY_SECTION_LENGTH_MAP5); if (Status != STATUS_SUCCESS) return Status; BeceemNVMRead(Adapter, (PUINT)&dwReadValue, EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5, 4); dwReadValue = ntohl(dwReadValue); } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread: Start address of HW_PARAM structure = 0x%lx", dwReadValue); /* * Validate if the address read out is within the DSD. * Adapter->uiNVMDSDSize gives whole DSD size inclusive of Autoinit. * lower limit should be above DSD_START_OFFSET and * upper limit should be below (Adapter->uiNVMDSDSize-DSD_START_OFFSET) */ if (dwReadValue < DSD_START_OFFSET || dwReadValue > (Adapter->uiNVMDSDSize-DSD_START_OFFSET)) return STATUS_IMAGE_CHECKSUM_MISMATCH; Status = ValidateHWParmStructure(Adapter, dwReadValue); if (Status) return Status; /* * Add DSD_START_OFFSET to the offset read from the EEPROM. * This will give the actual start HW Parameters start address. * To read GPIO section, add GPIO offset further. */ dwReadValue += DSD_START_OFFSET; /* = start address of hw param section. */ dwReadValue += GPIO_SECTION_START_OFFSET; /* = GPIO start offset within HW Param section. */ /* * Read the GPIO values for 32 GPIOs from EEPROM and map the function * number to GPIO pin number to GPIO_Array */ BeceemNVMRead(Adapter, (UINT *)ucGPIOInfo, dwReadValue, 32); for (ucIndex = 0; ucIndex < 32; ucIndex++) { switch (ucGPIOInfo[ucIndex]) { case RED_LED: GPIO_Array[RED_LED] = ucIndex; Adapter->gpioBitMap |= (1 << ucIndex); break; case BLUE_LED: GPIO_Array[BLUE_LED] = ucIndex; Adapter->gpioBitMap |= (1 << ucIndex); break; case YELLOW_LED: GPIO_Array[YELLOW_LED] = ucIndex; Adapter->gpioBitMap |= (1 << ucIndex); break; case GREEN_LED: GPIO_Array[GREEN_LED] = ucIndex; Adapter->gpioBitMap |= (1 << ucIndex); break; default: break; } } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "GPIO's bit map correspond to LED :0x%X", Adapter->gpioBitMap); return Status; } static int ReadConfigFileStructure(struct bcm_mini_adapter *Adapter, BOOLEAN *bEnableThread) { int Status = STATUS_SUCCESS; /* Array to store GPIO numbers from EEPROM */ UCHAR GPIO_Array[NUM_OF_LEDS+1]; UINT uiIndex = 0; UINT uiNum_of_LED_Type = 0; PUCHAR puCFGData = NULL; UCHAR bData = 0; memset(GPIO_Array, DISABLE_GPIO_NUM, NUM_OF_LEDS+1); if (!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams)) { BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Target Params not Avail.\n"); return -ENOENT; } /* Populate GPIO_Array with GPIO numbers for LED functions */ /* Read the GPIO numbers from EEPROM */ Status = ReadLEDInformationFromEEPROM(Adapter, GPIO_Array); if (Status == STATUS_IMAGE_CHECKSUM_MISMATCH) { *bEnableThread = FALSE; return STATUS_SUCCESS; } else if (Status) { *bEnableThread = FALSE; return Status; } /* * CONFIG file read successfully. Deallocate the memory of * uiFileNameBufferSize */ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread: Config file read successfully\n"); puCFGData = (PUCHAR) &Adapter->pstargetparams->HostDrvrConfig1; /* * Offset for HostDrvConfig1, HostDrvConfig2, HostDrvConfig3 which * will have the information of LED type, LED on state for different * driver state and LED blink state. */ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { bData = *puCFGData; /* * Check Bit 8 for polarity. If it is set, * polarity is reverse polarity */ if (bData & 0x80) { Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 0; /* unset the bit 8 */ bData = bData & 0x7f; } Adapter->LEDInfo.LEDState[uiIndex].LED_Type = bData; if (bData <= NUM_OF_LEDS) Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num = GPIO_Array[bData]; else Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num = DISABLE_GPIO_NUM; puCFGData++; bData = *puCFGData; Adapter->LEDInfo.LEDState[uiIndex].LED_On_State = bData; puCFGData++; bData = *puCFGData; Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State = bData; puCFGData++; } /* * Check if all the LED settings are disabled. If it is disabled, * dont launch the LED control thread. */ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { if ((Adapter->LEDInfo.LEDState[uiIndex].LED_Type == DISABLE_GPIO_NUM) || (Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0x7f) || (Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0)) uiNum_of_LED_Type++; } if (uiNum_of_LED_Type >= NUM_OF_LEDS) *bEnableThread = FALSE; return Status; } /* * ----------------------------------------------------------------------------- * Procedure: LedGpioInit * * Description: Initializes LED GPIOs. Makes the LED GPIOs to OUTPUT mode * and make the initial state to be OFF. * * Arguments: * Adapter - Pointer to MINI_ADAPTER structure. * * Returns: VOID * * ----------------------------------------------------------------------------- */ static VOID LedGpioInit(struct bcm_mini_adapter *Adapter) { UINT uiResetValue = 0; UINT uiIndex = 0; /* Set all LED GPIO Mode to output mode */ if (rdmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue, sizeof(uiResetValue)) < 0) BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread: RDM Failed\n"); for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM) uiResetValue |= (1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num); TURN_OFF_LED(1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num, uiIndex); } if (wrmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue, sizeof(uiResetValue)) < 0) BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread: WRM Failed\n"); Adapter->LEDInfo.bIdle_led_off = FALSE; } static INT BcmGetGPIOPinInfo(struct bcm_mini_adapter *Adapter, UCHAR *GPIO_num_tx, UCHAR *GPIO_num_rx, UCHAR *uiLedTxIndex, UCHAR *uiLedRxIndex, enum bcm_led_events currdriverstate) { UINT uiIndex = 0; *GPIO_num_tx = DISABLE_GPIO_NUM; *GPIO_num_rx = DISABLE_GPIO_NUM; for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { if ((currdriverstate == NORMAL_OPERATION) || (currdriverstate == IDLEMODE_EXIT) || (currdriverstate == FW_DOWNLOAD)) { if (Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State & currdriverstate) { if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM) { if (*GPIO_num_tx == DISABLE_GPIO_NUM) { *GPIO_num_tx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num; *uiLedTxIndex = uiIndex; } else { *GPIO_num_rx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num; *uiLedRxIndex = uiIndex; } } } } else { if (Adapter->LEDInfo.LEDState[uiIndex].LED_On_State & currdriverstate) { if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM) { *GPIO_num_tx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num; *uiLedTxIndex = uiIndex; } } } } return STATUS_SUCCESS; } static VOID LEDControlThread(struct bcm_mini_adapter *Adapter) { UINT uiIndex = 0; UCHAR GPIO_num = 0; UCHAR uiLedIndex = 0; UINT uiResetValue = 0; enum bcm_led_events currdriverstate = 0; ulong timeout = 0; INT Status = 0; UCHAR dummyGPIONum = 0; UCHAR dummyIndex = 0; /* currdriverstate = Adapter->DriverState; */ Adapter->LEDInfo.bIdleMode_tx_from_host = FALSE; /* * Wait till event is triggered * * wait_event(Adapter->LEDInfo.notify_led_event, * currdriverstate!= Adapter->DriverState); */ GPIO_num = DISABLE_GPIO_NUM; while (TRUE) { /* Wait till event is triggered */ if ((GPIO_num == DISABLE_GPIO_NUM) || ((currdriverstate != FW_DOWNLOAD) && (currdriverstate != NORMAL_OPERATION) && (currdriverstate != LOWPOWER_MODE_ENTER)) || (currdriverstate == LED_THREAD_INACTIVE)) Status = wait_event_interruptible( Adapter->LEDInfo.notify_led_event, currdriverstate != Adapter->DriverState || kthread_should_stop()); if (kthread_should_stop() || Adapter->device_removed) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting"); Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED; TURN_OFF_LED(1 << GPIO_num, uiLedIndex); return; /* STATUS_FAILURE; */ } if (GPIO_num != DISABLE_GPIO_NUM) TURN_OFF_LED(1 << GPIO_num, uiLedIndex); if (Adapter->LEDInfo.bLedInitDone == FALSE) { LedGpioInit(Adapter); Adapter->LEDInfo.bLedInitDone = TRUE; } switch (Adapter->DriverState) { case DRIVER_INIT: currdriverstate = DRIVER_INIT; /* Adapter->DriverState; */ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex, currdriverstate); if (GPIO_num != DISABLE_GPIO_NUM) TURN_ON_LED(1 << GPIO_num, uiLedIndex); break; case FW_DOWNLOAD: /* * BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, * LED_DUMP_INFO, DBG_LVL_ALL, * "LED Thread: FW_DN_DONE called\n"); */ currdriverstate = FW_DOWNLOAD; BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex, currdriverstate); if (GPIO_num != DISABLE_GPIO_NUM) { timeout = 50; LED_Blink(Adapter, 1 << GPIO_num, uiLedIndex, timeout, -1, currdriverstate); } break; case FW_DOWNLOAD_DONE: currdriverstate = FW_DOWNLOAD_DONE; BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex, currdriverstate); if (GPIO_num != DISABLE_GPIO_NUM) TURN_ON_LED(1 << GPIO_num, uiLedIndex); break; case SHUTDOWN_EXIT: /* * no break, continue to NO_NETWORK_ENTRY * state as well. */ case NO_NETWORK_ENTRY: currdriverstate = NO_NETWORK_ENTRY; BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyGPIONum, currdriverstate); if (GPIO_num != DISABLE_GPIO_NUM) TURN_ON_LED(1 << GPIO_num, uiLedIndex); break; case NORMAL_OPERATION: { UCHAR GPIO_num_tx = DISABLE_GPIO_NUM; UCHAR GPIO_num_rx = DISABLE_GPIO_NUM; UCHAR uiLEDTx = 0; UCHAR uiLEDRx = 0; currdriverstate = NORMAL_OPERATION; Adapter->LEDInfo.bIdle_led_off = FALSE; BcmGetGPIOPinInfo(Adapter, &GPIO_num_tx, &GPIO_num_rx, &uiLEDTx, &uiLEDRx, currdriverstate); if ((GPIO_num_tx == DISABLE_GPIO_NUM) && (GPIO_num_rx == DISABLE_GPIO_NUM)) { GPIO_num = DISABLE_GPIO_NUM; } else { /* * If single LED is selected, use same * for both Tx and Rx */ if (GPIO_num_tx == DISABLE_GPIO_NUM) { GPIO_num_tx = GPIO_num_rx; uiLEDTx = uiLEDRx; } else if (GPIO_num_rx == DISABLE_GPIO_NUM) { GPIO_num_rx = GPIO_num_tx; uiLEDRx = uiLEDTx; } /* * Blink the LED in proportionate * to Tx and Rx transmissions. */ LED_Proportional_Blink(Adapter, GPIO_num_tx, uiLEDTx, GPIO_num_rx, uiLEDRx, currdriverstate); } } break; case LOWPOWER_MODE_ENTER: currdriverstate = LOWPOWER_MODE_ENTER; if (DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING == Adapter->ulPowerSaveMode) { /* Turn OFF all the LED */ uiResetValue = 0; for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM) TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex); } } /* Turn off LED And WAKE-UP for Sendinf IDLE mode ACK */ Adapter->LEDInfo.bLedInitDone = FALSE; Adapter->LEDInfo.bIdle_led_off = TRUE; wake_up(&Adapter->LEDInfo.idleModeSyncEvent); GPIO_num = DISABLE_GPIO_NUM; break; case IDLEMODE_CONTINUE: currdriverstate = IDLEMODE_CONTINUE; GPIO_num = DISABLE_GPIO_NUM; break; case IDLEMODE_EXIT: break; case DRIVER_HALT: currdriverstate = DRIVER_HALT; GPIO_num = DISABLE_GPIO_NUM; for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM) TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex); } /* Adapter->DriverState = DRIVER_INIT; */ break; case LED_THREAD_INACTIVE: BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "InActivating LED thread..."); currdriverstate = LED_THREAD_INACTIVE; Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_RUNNING_INACTIVELY; Adapter->LEDInfo.bLedInitDone = FALSE; /* disable ALL LED */ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) { if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM) TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex); } break; case LED_THREAD_ACTIVE: BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Activating LED thread again..."); if (Adapter->LinkUpStatus == FALSE) Adapter->DriverState = NO_NETWORK_ENTRY; else Adapter->DriverState = NORMAL_OPERATION; Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_RUNNING_ACTIVELY; break; /* return; */ default: break; } } Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED; } int InitLedSettings(struct bcm_mini_adapter *Adapter) { int Status = STATUS_SUCCESS; BOOLEAN bEnableThread = TRUE; UCHAR uiIndex = 0; /* * Initially set BitPolarity to normal polarity. The bit 8 of LED type * is used to change the polarity of the LED. */ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 1; /* * Read the LED settings of CONFIG file and map it * to GPIO numbers in EEPROM */ Status = ReadConfigFileStructure(Adapter, &bEnableThread); if (STATUS_SUCCESS != Status) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread: FAILED in ReadConfigFileStructure\n"); return Status; } if (Adapter->LEDInfo.led_thread_running) { if (bEnableThread) { ; } else { Adapter->DriverState = DRIVER_HALT; wake_up(&Adapter->LEDInfo.notify_led_event); Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED; } } else if (bEnableThread) { /* Create secondary thread to handle the LEDs */ init_waitqueue_head(&Adapter->LEDInfo.notify_led_event); init_waitqueue_head(&Adapter->LEDInfo.idleModeSyncEvent); Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_RUNNING_ACTIVELY; Adapter->LEDInfo.bIdle_led_off = FALSE; Adapter->LEDInfo.led_cntrl_threadid = kthread_run((int (*)(void *)) LEDControlThread, Adapter, "led_control_thread"); if (IS_ERR(Adapter->LEDInfo.led_cntrl_threadid)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Not able to spawn Kernel Thread\n"); Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED; return PTR_ERR(Adapter->LEDInfo.led_cntrl_threadid); } } return Status; }
gpl-2.0
N30nHaCkZ/android-platform_frameworks_base
arch/arm/mach-dove/dove-db-setup.c
4090
2732
/* * arch/arm/mach-dove/dove-db-setup.c * * Marvell DB-MV88AP510-BP Development Board Setup * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/mtd/physmap.h> #include <linux/mtd/nand.h> #include <linux/timer.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/i2c.h> #include <linux/pci.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/dove.h> #include "common.h" static struct mv643xx_eth_platform_data dove_db_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR_DEFAULT, }; static struct mv_sata_platform_data dove_db_sata_data = { .n_ports = 1, }; /***************************************************************************** * SPI Devices: * SPI0: 4M Flash ST-M25P32-VMF6P ****************************************************************************/ static const struct flash_platform_data dove_db_spi_flash_data = { .type = "m25p64", }; static struct spi_board_info __initdata dove_db_spi_flash_info[] = { { .modalias = "m25p80", .platform_data = &dove_db_spi_flash_data, .irq = -1, .max_speed_hz = 20000000, .bus_num = 0, .chip_select = 0, }, }; /***************************************************************************** * PCI ****************************************************************************/ static int __init dove_db_pci_init(void) { if (machine_is_dove_db()) dove_pcie_init(1, 1); return 0; } subsys_initcall(dove_db_pci_init); /***************************************************************************** * Board Init ****************************************************************************/ static void __init dove_db_init(void) { /* * Basic Dove setup. Needs to be called early. */ dove_init(); dove_ge00_init(&dove_db_ge00_data); dove_ehci0_init(); dove_ehci1_init(); dove_sata_init(&dove_db_sata_data); dove_sdio0_init(); dove_sdio1_init(); dove_spi0_init(); dove_spi1_init(); dove_uart0_init(); dove_uart1_init(); dove_i2c_init(); spi_register_board_info(dove_db_spi_flash_info, ARRAY_SIZE(dove_db_spi_flash_info)); } MACHINE_START(DOVE_DB, "Marvell DB-MV88AP510-BP Development Board") .atag_offset = 0x100, .init_machine = dove_db_init, .map_io = dove_map_io, .init_early = dove_init_early, .init_irq = dove_init_irq, .init_time = dove_timer_init, .restart = dove_restart, MACHINE_END
gpl-2.0
calixtolinuxplatform/linux-3.12.10-ti-calixto
arch/arm/mach-dove/dove-db-setup.c
4090
2732
/* * arch/arm/mach-dove/dove-db-setup.c * * Marvell DB-MV88AP510-BP Development Board Setup * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/mtd/physmap.h> #include <linux/mtd/nand.h> #include <linux/timer.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/i2c.h> #include <linux/pci.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/dove.h> #include "common.h" static struct mv643xx_eth_platform_data dove_db_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR_DEFAULT, }; static struct mv_sata_platform_data dove_db_sata_data = { .n_ports = 1, }; /***************************************************************************** * SPI Devices: * SPI0: 4M Flash ST-M25P32-VMF6P ****************************************************************************/ static const struct flash_platform_data dove_db_spi_flash_data = { .type = "m25p64", }; static struct spi_board_info __initdata dove_db_spi_flash_info[] = { { .modalias = "m25p80", .platform_data = &dove_db_spi_flash_data, .irq = -1, .max_speed_hz = 20000000, .bus_num = 0, .chip_select = 0, }, }; /***************************************************************************** * PCI ****************************************************************************/ static int __init dove_db_pci_init(void) { if (machine_is_dove_db()) dove_pcie_init(1, 1); return 0; } subsys_initcall(dove_db_pci_init); /***************************************************************************** * Board Init ****************************************************************************/ static void __init dove_db_init(void) { /* * Basic Dove setup. Needs to be called early. */ dove_init(); dove_ge00_init(&dove_db_ge00_data); dove_ehci0_init(); dove_ehci1_init(); dove_sata_init(&dove_db_sata_data); dove_sdio0_init(); dove_sdio1_init(); dove_spi0_init(); dove_spi1_init(); dove_uart0_init(); dove_uart1_init(); dove_i2c_init(); spi_register_board_info(dove_db_spi_flash_info, ARRAY_SIZE(dove_db_spi_flash_info)); } MACHINE_START(DOVE_DB, "Marvell DB-MV88AP510-BP Development Board") .atag_offset = 0x100, .init_machine = dove_db_init, .map_io = dove_map_io, .init_early = dove_init_early, .init_irq = dove_init_irq, .init_time = dove_timer_init, .restart = dove_restart, MACHINE_END
gpl-2.0
MojieBuddhist/linux-1
arch/arm/mach-ks8695/board-sg.c
4090
2668
/* * board-sg.c -- support for the SnapGear KS8695 based boards * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/physmap.h> #include <linux/mtd/partitions.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/devices.h> #include "generic.h" /* * The SG310 machine type is fitted with a conventional 8MB Strataflash * device. Define its partitioning. */ #define FL_BASE 0x02000000 #define FL_SIZE SZ_8M static struct mtd_partition sg_mtd_partitions[] = { [0] = { .name = "SnapGear Boot Loader", .size = SZ_128K, }, [1] = { .name = "SnapGear non-volatile configuration", .size = SZ_512K, .offset = SZ_256K, }, [2] = { .name = "SnapGear image", .offset = SZ_512K + SZ_256K, }, [3] = { .name = "SnapGear StrataFlash", }, [4] = { .name = "SnapGear Boot Tags", .size = SZ_128K, .offset = SZ_128K, }, }; static struct physmap_flash_data sg_mtd_pdata = { .width = 1, .nr_parts = ARRAY_SIZE(sg_mtd_partitions), .parts = sg_mtd_partitions, }; static struct resource sg_mtd_resource[] = { [0] = { .start = FL_BASE, .end = FL_BASE + FL_SIZE - 1, .flags = IORESOURCE_MEM, }, }; static struct platform_device sg_mtd_device = { .name = "physmap-flash", .id = 0, .num_resources = ARRAY_SIZE(sg_mtd_resource), .resource = sg_mtd_resource, .dev = { .platform_data = &sg_mtd_pdata, }, }; static void __init sg_init(void) { ks8695_add_device_lan(); ks8695_add_device_wan(); if (machine_is_sg310()) platform_device_register(&sg_mtd_device); } #ifdef CONFIG_MACH_LITE300 MACHINE_START(LITE300, "SecureComputing/SG300") /* SnapGear */ .atag_offset = 0x100, .map_io = ks8695_map_io, .init_irq = ks8695_init_irq, .init_machine = sg_init, .init_time = ks8695_timer_init, .restart = ks8695_restart, MACHINE_END #endif #ifdef CONFIG_MACH_SG310 MACHINE_START(SG310, "McAfee/SG310") /* SnapGear */ .atag_offset = 0x100, .map_io = ks8695_map_io, .init_irq = ks8695_init_irq, .init_machine = sg_init, .init_time = ks8695_timer_init, .restart = ks8695_restart, MACHINE_END #endif #ifdef CONFIG_MACH_SE4200 MACHINE_START(SE4200, "SecureComputing/SE4200") /* SnapGear */ .atag_offset = 0x100, .map_io = ks8695_map_io, .init_irq = ks8695_init_irq, .init_machine = sg_init, .init_time = ks8695_timer_init, .restart = ks8695_restart, MACHINE_END #endif
gpl-2.0
CyanogenMod/android_kernel_lge_bullhead
arch/arm/mach-mv78xx0/rd78x00-masa-setup.c
4090
2249
/* * arch/arm/mach-mv78x00/rd78x00-masa-setup.c * * Marvell RD-78x00-mASA Development Board Setup * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> #include <linux/mv643xx_eth.h> #include <linux/ethtool.h> #include <mach/mv78xx0.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include "common.h" static struct mv643xx_eth_platform_data rd78x00_masa_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; static struct mv643xx_eth_platform_data rd78x00_masa_ge01_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(9), }; static struct mv643xx_eth_platform_data rd78x00_masa_ge10_data = { }; static struct mv643xx_eth_platform_data rd78x00_masa_ge11_data = { }; static struct mv_sata_platform_data rd78x00_masa_sata_data = { .n_ports = 2, }; static void __init rd78x00_masa_init(void) { /* * Basic MV78x00 setup. Needs to be called early. */ mv78xx0_init(); /* * Partition on-chip peripherals between the two CPU cores. */ if (mv78xx0_core_index() == 0) { mv78xx0_ehci0_init(); mv78xx0_ehci1_init(); mv78xx0_ge00_init(&rd78x00_masa_ge00_data); mv78xx0_ge10_init(&rd78x00_masa_ge10_data); mv78xx0_sata_init(&rd78x00_masa_sata_data); mv78xx0_uart0_init(); mv78xx0_uart2_init(); } else { mv78xx0_ehci2_init(); mv78xx0_ge01_init(&rd78x00_masa_ge01_data); mv78xx0_ge11_init(&rd78x00_masa_ge11_data); mv78xx0_uart1_init(); mv78xx0_uart3_init(); } } static int __init rd78x00_pci_init(void) { /* * Assign all PCIe devices to CPU core #0. */ if (machine_is_rd78x00_masa() && mv78xx0_core_index() == 0) mv78xx0_pcie_init(1, 1); return 0; } subsys_initcall(rd78x00_pci_init); MACHINE_START(RD78X00_MASA, "Marvell RD-78x00-MASA Development Board") /* Maintainer: Lennert Buytenhek <buytenh@marvell.com> */ .atag_offset = 0x100, .init_machine = rd78x00_masa_init, .map_io = mv78xx0_map_io, .init_early = mv78xx0_init_early, .init_irq = mv78xx0_init_irq, .init_time = mv78xx0_timer_init, .restart = mv78xx0_restart, MACHINE_END
gpl-2.0
G2Mini-DevTeam/android_kernel_lge_msm8226
drivers/pcmcia/cardbus.c
4858
2830
/* * cardbus.c -- 16-bit PCMCIA core support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * The initial developer of the original code is David A. Hinds * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * (C) 1999 David A. Hinds */ /* * Cardbus handling has been re-written to be more of a PCI bridge thing, * and the PCI code basically does all the resource handling. * * Linus, Jan 2000 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <pcmcia/ss.h> static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { u8 irq_pin; /* * Since there is only one interrupt available to * CardBus devices, all devices downstream of this * device must be using this IRQ. */ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq_pin); if (irq_pin) { dev->irq = irq; pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); } /* * Some controllers transfer very slowly with 0 CLS. * Configure it. This may fail as CLS configuration * is mandatory only for MWI. */ pci_set_cacheline_size(dev); if (dev->subordinate) cardbus_config_irq_and_cls(dev->subordinate, irq); } } /** * cb_alloc() - add CardBus device * @s: the pcmcia_socket where the CardBus device is located * * cb_alloc() allocates the kernel data structures for a Cardbus device * and handles the lowest level PCI device setup issues. */ int __ref cb_alloc(struct pcmcia_socket *s) { struct pci_bus *bus = s->cb_dev->subordinate; struct pci_dev *dev; unsigned int max, pass; s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); pci_fixup_cardbus(bus); max = bus->secondary; for (pass = 0; pass < 2; pass++) list_for_each_entry(dev, &bus->devices, bus_list) if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) max = pci_scan_bridge(bus, dev, max, pass); /* * Size all resources below the CardBus controller. */ pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); cardbus_config_irq_and_cls(bus, s->pci_irq); /* socket specific tune function */ if (s->tune_bridge) s->tune_bridge(s, bus); pci_enable_bridges(bus); pci_bus_add_devices(bus); return 0; } /** * cb_free() - remove CardBus device * @s: the pcmcia_socket where the CardBus device was located * * cb_free() handles the lowest level PCI device cleanup. */ void cb_free(struct pcmcia_socket *s) { struct pci_dev *bridge = s->cb_dev; if (bridge) pci_stop_and_remove_behind_bridge(bridge); }
gpl-2.0
flar2/flo-ElementalX
sound/atmel/abdac.c
4858
15732
/* * Driver for the Atmel on-chip Audio Bitstream DAC (ABDAC) * * Copyright (C) 2006-2009 Atmel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/clk.h> #include <linux/bitmap.h> #include <linux/dw_dmac.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/io.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/atmel-abdac.h> /* DAC register offsets */ #define DAC_DATA 0x0000 #define DAC_CTRL 0x0008 #define DAC_INT_MASK 0x000c #define DAC_INT_EN 0x0010 #define DAC_INT_DIS 0x0014 #define DAC_INT_CLR 0x0018 #define DAC_INT_STATUS 0x001c /* Bitfields in CTRL */ #define DAC_SWAP_OFFSET 30 #define DAC_SWAP_SIZE 1 #define DAC_EN_OFFSET 31 #define DAC_EN_SIZE 1 /* Bitfields in INT_MASK/INT_EN/INT_DIS/INT_STATUS/INT_CLR */ #define DAC_UNDERRUN_OFFSET 28 #define DAC_UNDERRUN_SIZE 1 #define DAC_TX_READY_OFFSET 29 #define DAC_TX_READY_SIZE 1 /* Bit manipulation macros */ #define DAC_BIT(name) \ (1 << DAC_##name##_OFFSET) #define DAC_BF(name, value) \ (((value) & ((1 << DAC_##name##_SIZE) - 1)) \ << DAC_##name##_OFFSET) #define DAC_BFEXT(name, value) \ (((value) >> DAC_##name##_OFFSET) \ & ((1 << DAC_##name##_SIZE) - 1)) #define DAC_BFINS(name, value, old) \ (((old) & ~(((1 << DAC_##name##_SIZE) - 1) \ << DAC_##name##_OFFSET)) \ | DAC_BF(name, value)) /* Register access macros */ #define dac_readl(port, reg) \ __raw_readl((port)->regs + DAC_##reg) #define dac_writel(port, reg, value) \ __raw_writel((value), (port)->regs + DAC_##reg) /* * ABDAC supports a maximum of 6 different rates from a generic clock. The * generic clock has a power of two divider, which gives 6 steps from 192 kHz * to 5112 Hz. */ #define MAX_NUM_RATES 6 /* ALSA seems to use rates between 192000 Hz and 5112 Hz. */ #define RATE_MAX 192000 #define RATE_MIN 5112 enum { DMA_READY = 0, }; struct atmel_abdac_dma { struct dma_chan *chan; struct dw_cyclic_desc *cdesc; }; struct atmel_abdac { struct clk *pclk; struct clk *sample_clk; struct platform_device *pdev; struct atmel_abdac_dma dma; struct snd_pcm_hw_constraint_list constraints_rates; struct snd_pcm_substream *substream; struct snd_card *card; struct snd_pcm *pcm; void __iomem *regs; unsigned long flags; unsigned int rates[MAX_NUM_RATES]; unsigned int rates_num; int irq; }; #define get_dac(card) ((struct atmel_abdac *)(card)->private_data) /* This function is called by the DMA driver. */ static void atmel_abdac_dma_period_done(void *arg) { struct atmel_abdac *dac = arg; snd_pcm_period_elapsed(dac->substream); } static int atmel_abdac_prepare_dma(struct atmel_abdac *dac, struct snd_pcm_substream *substream, enum dma_data_direction direction) { struct dma_chan *chan = dac->dma.chan; struct dw_cyclic_desc *cdesc; struct snd_pcm_runtime *runtime = substream->runtime; unsigned long buffer_len, period_len; /* * We don't do DMA on "complex" transfers, i.e. with * non-halfword-aligned buffers or lengths. */ if (runtime->dma_addr & 1 || runtime->buffer_size & 1) { dev_dbg(&dac->pdev->dev, "too complex transfer\n"); return -EINVAL; } buffer_len = frames_to_bytes(runtime, runtime->buffer_size); period_len = frames_to_bytes(runtime, runtime->period_size); cdesc = dw_dma_cyclic_prep(chan, runtime->dma_addr, buffer_len, period_len, DMA_MEM_TO_DEV); if (IS_ERR(cdesc)) { dev_dbg(&dac->pdev->dev, "could not prepare cyclic DMA\n"); return PTR_ERR(cdesc); } cdesc->period_callback = atmel_abdac_dma_period_done; cdesc->period_callback_param = dac; dac->dma.cdesc = cdesc; set_bit(DMA_READY, &dac->flags); return 0; } static struct snd_pcm_hardware atmel_abdac_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_PAUSE), .formats = (SNDRV_PCM_FMTBIT_S16_BE), .rates = (SNDRV_PCM_RATE_KNOT), .rate_min = RATE_MIN, .rate_max = RATE_MAX, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 64 * 4096, .period_bytes_min = 4096, .period_bytes_max = 4096, .periods_min = 6, .periods_max = 64, }; static int atmel_abdac_open(struct snd_pcm_substream *substream) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); dac->substream = substream; atmel_abdac_hw.rate_max = dac->rates[dac->rates_num - 1]; atmel_abdac_hw.rate_min = dac->rates[0]; substream->runtime->hw = atmel_abdac_hw; return snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &dac->constraints_rates); } static int atmel_abdac_close(struct snd_pcm_substream *substream) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); dac->substream = NULL; return 0; } static int atmel_abdac_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); int retval; retval = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (retval < 0) return retval; /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ if (retval == 1) if (test_and_clear_bit(DMA_READY, &dac->flags)) dw_dma_cyclic_free(dac->dma.chan); return retval; } static int atmel_abdac_hw_free(struct snd_pcm_substream *substream) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); if (test_and_clear_bit(DMA_READY, &dac->flags)) dw_dma_cyclic_free(dac->dma.chan); return snd_pcm_lib_free_pages(substream); } static int atmel_abdac_prepare(struct snd_pcm_substream *substream) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); int retval; retval = clk_set_rate(dac->sample_clk, 256 * substream->runtime->rate); if (retval) return retval; if (!test_bit(DMA_READY, &dac->flags)) retval = atmel_abdac_prepare_dma(dac, substream, DMA_TO_DEVICE); return retval; } static int atmel_abdac_trigger(struct snd_pcm_substream *substream, int cmd) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); int retval = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */ case SNDRV_PCM_TRIGGER_RESUME: /* fall through */ case SNDRV_PCM_TRIGGER_START: clk_enable(dac->sample_clk); retval = dw_dma_cyclic_start(dac->dma.chan); if (retval) goto out; dac_writel(dac, CTRL, DAC_BIT(EN)); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */ case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */ case SNDRV_PCM_TRIGGER_STOP: dw_dma_cyclic_stop(dac->dma.chan); dac_writel(dac, DATA, 0); dac_writel(dac, CTRL, 0); clk_disable(dac->sample_clk); break; default: retval = -EINVAL; break; } out: return retval; } static snd_pcm_uframes_t atmel_abdac_pointer(struct snd_pcm_substream *substream) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t frames; unsigned long bytes; bytes = dw_dma_get_src_addr(dac->dma.chan); bytes -= runtime->dma_addr; frames = bytes_to_frames(runtime, bytes); if (frames >= runtime->buffer_size) frames -= runtime->buffer_size; return frames; } static irqreturn_t abdac_interrupt(int irq, void *dev_id) { struct atmel_abdac *dac = dev_id; u32 status; status = dac_readl(dac, INT_STATUS); if (status & DAC_BIT(UNDERRUN)) { dev_err(&dac->pdev->dev, "underrun detected\n"); dac_writel(dac, INT_CLR, DAC_BIT(UNDERRUN)); } else { dev_err(&dac->pdev->dev, "spurious interrupt (status=0x%x)\n", status); dac_writel(dac, INT_CLR, status); } return IRQ_HANDLED; } static struct snd_pcm_ops atmel_abdac_ops = { .open = atmel_abdac_open, .close = atmel_abdac_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = atmel_abdac_hw_params, .hw_free = atmel_abdac_hw_free, .prepare = atmel_abdac_prepare, .trigger = atmel_abdac_trigger, .pointer = atmel_abdac_pointer, }; static int __devinit atmel_abdac_pcm_new(struct atmel_abdac *dac) { struct snd_pcm_hardware hw = atmel_abdac_hw; struct snd_pcm *pcm; int retval; retval = snd_pcm_new(dac->card, dac->card->shortname, dac->pdev->id, 1, 0, &pcm); if (retval) return retval; strcpy(pcm->name, dac->card->shortname); pcm->private_data = dac; pcm->info_flags = 0; dac->pcm = pcm; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &atmel_abdac_ops); retval = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, &dac->pdev->dev, hw.periods_min * hw.period_bytes_min, hw.buffer_bytes_max); return retval; } static bool filter(struct dma_chan *chan, void *slave) { struct dw_dma_slave *dws = slave; if (dws->dma_dev == chan->device->dev) { chan->private = dws; return true; } else return false; } static int set_sample_rates(struct atmel_abdac *dac) { long new_rate = RATE_MAX; int retval = -EINVAL; int index = 0; /* we start at 192 kHz and work our way down to 5112 Hz */ while (new_rate >= RATE_MIN && index < (MAX_NUM_RATES + 1)) { new_rate = clk_round_rate(dac->sample_clk, 256 * new_rate); if (new_rate < 0) break; /* make sure we are below the ABDAC clock */ if (new_rate <= clk_get_rate(dac->pclk)) { dac->rates[index] = new_rate / 256; index++; } /* divide by 256 and then by two to get next rate */ new_rate /= 256 * 2; } if (index) { int i; /* reverse array, smallest go first */ for (i = 0; i < (index / 2); i++) { unsigned int tmp = dac->rates[index - 1 - i]; dac->rates[index - 1 - i] = dac->rates[i]; dac->rates[i] = tmp; } dac->constraints_rates.count = index; dac->constraints_rates.list = dac->rates; dac->constraints_rates.mask = 0; dac->rates_num = index; retval = 0; } return retval; } static int __devinit atmel_abdac_probe(struct platform_device *pdev) { struct snd_card *card; struct atmel_abdac *dac; struct resource *regs; struct atmel_abdac_pdata *pdata; struct clk *pclk; struct clk *sample_clk; int retval; int irq; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_dbg(&pdev->dev, "no memory resource\n"); return -ENXIO; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_dbg(&pdev->dev, "could not get IRQ number\n"); return irq; } pdata = pdev->dev.platform_data; if (!pdata) { dev_dbg(&pdev->dev, "no platform data\n"); return -ENXIO; } pclk = clk_get(&pdev->dev, "pclk"); if (IS_ERR(pclk)) { dev_dbg(&pdev->dev, "no peripheral clock\n"); return PTR_ERR(pclk); } sample_clk = clk_get(&pdev->dev, "sample_clk"); if (IS_ERR(sample_clk)) { dev_dbg(&pdev->dev, "no sample clock\n"); retval = PTR_ERR(sample_clk); goto out_put_pclk; } clk_enable(pclk); retval = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, sizeof(struct atmel_abdac), &card); if (retval) { dev_dbg(&pdev->dev, "could not create sound card device\n"); goto out_put_sample_clk; } dac = get_dac(card); dac->irq = irq; dac->card = card; dac->pclk = pclk; dac->sample_clk = sample_clk; dac->pdev = pdev; retval = set_sample_rates(dac); if (retval < 0) { dev_dbg(&pdev->dev, "could not set supported rates\n"); goto out_free_card; } dac->regs = ioremap(regs->start, resource_size(regs)); if (!dac->regs) { dev_dbg(&pdev->dev, "could not remap register memory\n"); goto out_free_card; } /* make sure the DAC is silent and disabled */ dac_writel(dac, DATA, 0); dac_writel(dac, CTRL, 0); retval = request_irq(irq, abdac_interrupt, 0, "abdac", dac); if (retval) { dev_dbg(&pdev->dev, "could not request irq\n"); goto out_unmap_regs; } snd_card_set_dev(card, &pdev->dev); if (pdata->dws.dma_dev) { dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); dac->dma.chan = dma_request_channel(mask, filter, &pdata->dws); if (dac->dma.chan) { struct dma_slave_config dma_conf = { .dst_addr = regs->start + DAC_DATA, .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, .src_maxburst = 1, .dst_maxburst = 1, .direction = DMA_MEM_TO_DEV, .device_fc = false, }; dmaengine_slave_config(dac->dma.chan, &dma_conf); } } if (!pdata->dws.dma_dev || !dac->dma.chan) { dev_dbg(&pdev->dev, "DMA not available\n"); retval = -ENODEV; goto out_unset_card_dev; } strcpy(card->driver, "Atmel ABDAC"); strcpy(card->shortname, "Atmel ABDAC"); sprintf(card->longname, "Atmel Audio Bitstream DAC"); retval = atmel_abdac_pcm_new(dac); if (retval) { dev_dbg(&pdev->dev, "could not register ABDAC pcm device\n"); goto out_release_dma; } retval = snd_card_register(card); if (retval) { dev_dbg(&pdev->dev, "could not register sound card\n"); goto out_release_dma; } platform_set_drvdata(pdev, card); dev_info(&pdev->dev, "Atmel ABDAC at 0x%p using %s\n", dac->regs, dev_name(&dac->dma.chan->dev->device)); return retval; out_release_dma: dma_release_channel(dac->dma.chan); dac->dma.chan = NULL; out_unset_card_dev: snd_card_set_dev(card, NULL); free_irq(irq, dac); out_unmap_regs: iounmap(dac->regs); out_free_card: snd_card_free(card); out_put_sample_clk: clk_put(sample_clk); clk_disable(pclk); out_put_pclk: clk_put(pclk); return retval; } #ifdef CONFIG_PM static int atmel_abdac_suspend(struct platform_device *pdev, pm_message_t msg) { struct snd_card *card = platform_get_drvdata(pdev); struct atmel_abdac *dac = card->private_data; dw_dma_cyclic_stop(dac->dma.chan); clk_disable(dac->sample_clk); clk_disable(dac->pclk); return 0; } static int atmel_abdac_resume(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); struct atmel_abdac *dac = card->private_data; clk_enable(dac->pclk); clk_enable(dac->sample_clk); if (test_bit(DMA_READY, &dac->flags)) dw_dma_cyclic_start(dac->dma.chan); return 0; } #else #define atmel_abdac_suspend NULL #define atmel_abdac_resume NULL #endif static int __devexit atmel_abdac_remove(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); struct atmel_abdac *dac = get_dac(card); clk_put(dac->sample_clk); clk_disable(dac->pclk); clk_put(dac->pclk); dma_release_channel(dac->dma.chan); dac->dma.chan = NULL; snd_card_set_dev(card, NULL); iounmap(dac->regs); free_irq(dac->irq, dac); snd_card_free(card); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver atmel_abdac_driver = { .remove = __devexit_p(atmel_abdac_remove), .driver = { .name = "atmel_abdac", }, .suspend = atmel_abdac_suspend, .resume = atmel_abdac_resume, }; static int __init atmel_abdac_init(void) { return platform_driver_probe(&atmel_abdac_driver, atmel_abdac_probe); } module_init(atmel_abdac_init); static void __exit atmel_abdac_exit(void) { platform_driver_unregister(&atmel_abdac_driver); } module_exit(atmel_abdac_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for Atmel Audio Bitstream DAC (ABDAC)"); MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
gpl-2.0
faust93/franco_geehrc_f93
drivers/ide/ide-cd.c
5370
46958
/* * ATAPI CD-ROM driver. * * Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov> * Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org> * Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de> * Copyright (C) 2005, 2007-2009 Bartlomiej Zolnierkiewicz * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. * * See Documentation/cdrom/ide-cd for usage information. * * Suggestions are welcome. Patches that work are more welcome though. ;-) * * Documentation: * Mt. Fuji (SFF8090 version 4) and ATAPI (SFF-8020i rev 2.6) standards. * * For historical changelog please see: * Documentation/ide/ChangeLog.ide-cd.1994-2004 */ #define DRV_NAME "ide-cd" #define PFX DRV_NAME ": " #define IDECD_VERSION "5.00" #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/cdrom.h> #include <linux/ide.h> #include <linux/completion.h> #include <linux/mutex.h> #include <linux/bcd.h> /* For SCSI -> ATAPI command conversion */ #include <scsi/scsi.h> #include <linux/io.h> #include <asm/byteorder.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include "ide-cd.h" static DEFINE_MUTEX(ide_cd_mutex); static DEFINE_MUTEX(idecd_ref_mutex); static void ide_cd_release(struct device *); static struct cdrom_info *ide_cd_get(struct gendisk *disk) { struct cdrom_info *cd = NULL; mutex_lock(&idecd_ref_mutex); cd = ide_drv_g(disk, cdrom_info); if (cd) { if (ide_device_get(cd->drive)) cd = NULL; else get_device(&cd->dev); } mutex_unlock(&idecd_ref_mutex); return cd; } static void ide_cd_put(struct cdrom_info *cd) { ide_drive_t *drive = cd->drive; mutex_lock(&idecd_ref_mutex); put_device(&cd->dev); ide_device_put(drive); mutex_unlock(&idecd_ref_mutex); } /* * Generic packet command support and error handling routines. */ /* Mark that we've seen a media change and invalidate our internal buffers. */ static void cdrom_saw_media_change(ide_drive_t *drive) { drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED; drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID; } static int cdrom_log_sense(ide_drive_t *drive, struct request *rq) { struct request_sense *sense = &drive->sense_data; int log = 0; if (!sense || !rq || (rq->cmd_flags & REQ_QUIET)) return 0; ide_debug_log(IDE_DBG_SENSE, "sense_key: 0x%x", sense->sense_key); switch (sense->sense_key) { case NO_SENSE: case RECOVERED_ERROR: break; case NOT_READY: /* * don't care about tray state messages for e.g. capacity * commands or in-progress or becoming ready */ if (sense->asc == 0x3a || sense->asc == 0x04) break; log = 1; break; case ILLEGAL_REQUEST: /* * don't log START_STOP unit with LoEj set, since we cannot * reliably check if drive can auto-close */ if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24) break; log = 1; break; case UNIT_ATTENTION: /* * Make good and sure we've seen this potential media change. * Some drives (i.e. Creative) fail to present the correct sense * key in the error register. */ cdrom_saw_media_change(drive); break; default: log = 1; break; } return log; } static void cdrom_analyze_sense_data(ide_drive_t *drive, struct request *failed_command) { struct request_sense *sense = &drive->sense_data; struct cdrom_info *info = drive->driver_data; unsigned long sector; unsigned long bio_sectors; ide_debug_log(IDE_DBG_SENSE, "error_code: 0x%x, sense_key: 0x%x", sense->error_code, sense->sense_key); if (failed_command) ide_debug_log(IDE_DBG_SENSE, "failed cmd: 0x%x", failed_command->cmd[0]); if (!cdrom_log_sense(drive, failed_command)) return; /* * If a read toc is executed for a CD-R or CD-RW medium where the first * toc has not been recorded yet, it will fail with 05/24/00 (which is a * confusing error) */ if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP) if (sense->sense_key == 0x05 && sense->asc == 0x24) return; /* current error */ if (sense->error_code == 0x70) { switch (sense->sense_key) { case MEDIUM_ERROR: case VOLUME_OVERFLOW: case ILLEGAL_REQUEST: if (!sense->valid) break; if (failed_command == NULL || failed_command->cmd_type != REQ_TYPE_FS) break; sector = (sense->information[0] << 24) | (sense->information[1] << 16) | (sense->information[2] << 8) | (sense->information[3]); if (queue_logical_block_size(drive->queue) == 2048) /* device sector size is 2K */ sector <<= 2; bio_sectors = max(bio_sectors(failed_command->bio), 4U); sector &= ~(bio_sectors - 1); /* * The SCSI specification allows for the value * returned by READ CAPACITY to be up to 75 2K * sectors past the last readable block. * Therefore, if we hit a medium error within the * last 75 2K sectors, we decrease the saved size * value. */ if (sector < get_capacity(info->disk) && drive->probed_capacity - sector < 4 * 75) set_capacity(info->disk, sector); } } ide_cd_log_error(drive->name, failed_command, sense); } static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) { /* * For REQ_TYPE_SENSE, "rq->special" points to the original * failed request. Also, the sense data should be read * directly from rq which might be different from the original * sense buffer if it got copied during mapping. */ struct request *failed = (struct request *)rq->special; void *sense = bio_data(rq->bio); if (failed) { if (failed->sense) { /* * Sense is always read into drive->sense_data. * Copy back if the failed request has its * sense pointer set. */ memcpy(failed->sense, sense, 18); failed->sense_len = rq->sense_len; } cdrom_analyze_sense_data(drive, failed); if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed))) BUG(); } else cdrom_analyze_sense_data(drive, NULL); } /* * Allow the drive 5 seconds to recover; some devices will return NOT_READY * while flushing data from cache. * * returns: 0 failed (write timeout expired) * 1 success */ static int ide_cd_breathe(ide_drive_t *drive, struct request *rq) { struct cdrom_info *info = drive->driver_data; if (!rq->errors) info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY; rq->errors = 1; if (time_after(jiffies, info->write_timeout)) return 0; else { /* * take a breather */ blk_delay_queue(drive->queue, 1); return 1; } } /** * Returns: * 0: if the request should be continued. * 1: if the request will be going through error recovery. * 2: if the request should be ended. */ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) { ide_hwif_t *hwif = drive->hwif; struct request *rq = hwif->rq; int err, sense_key, do_end_request = 0; /* get the IDE error register */ err = ide_read_error(drive); sense_key = err >> 4; ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, rq->cmd_type: 0x%x, err: 0x%x, " "stat 0x%x", rq->cmd[0], rq->cmd_type, err, stat); if (rq->cmd_type == REQ_TYPE_SENSE) { /* * We got an error trying to get sense info from the drive * (probably while trying to recover from a former error). * Just give up. */ rq->cmd_flags |= REQ_FAILED; return 2; } /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */ if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors) rq->errors = SAM_STAT_CHECK_CONDITION; if (blk_noretry_request(rq)) do_end_request = 1; switch (sense_key) { case NOT_READY: if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) { if (ide_cd_breathe(drive, rq)) return 1; } else { cdrom_saw_media_change(drive); if (rq->cmd_type == REQ_TYPE_FS && !(rq->cmd_flags & REQ_QUIET)) printk(KERN_ERR PFX "%s: tray open\n", drive->name); } do_end_request = 1; break; case UNIT_ATTENTION: cdrom_saw_media_change(drive); if (rq->cmd_type != REQ_TYPE_FS) return 0; /* * Arrange to retry the request but be sure to give up if we've * retried too many times. */ if (++rq->errors > ERROR_MAX) do_end_request = 1; break; case ILLEGAL_REQUEST: /* * Don't print error message for this condition -- SFF8090i * indicates that 5/24/00 is the correct response to a request * to close the tray if the drive doesn't have that capability. * * cdrom_log_sense() knows this! */ if (rq->cmd[0] == GPCMD_START_STOP_UNIT) break; /* fall-through */ case DATA_PROTECT: /* * No point in retrying after an illegal request or data * protect error. */ if (!(rq->cmd_flags & REQ_QUIET)) ide_dump_status(drive, "command error", stat); do_end_request = 1; break; case MEDIUM_ERROR: /* * No point in re-trying a zillion times on a bad sector. * If we got here the error is not correctable. */ if (!(rq->cmd_flags & REQ_QUIET)) ide_dump_status(drive, "media error " "(bad sector)", stat); do_end_request = 1; break; case BLANK_CHECK: /* disk appears blank? */ if (!(rq->cmd_flags & REQ_QUIET)) ide_dump_status(drive, "media error (blank)", stat); do_end_request = 1; break; default: if (rq->cmd_type != REQ_TYPE_FS) break; if (err & ~ATA_ABORTED) { /* go to the default handler for other errors */ ide_error(drive, "cdrom_decode_status", stat); return 1; } else if (++rq->errors > ERROR_MAX) /* we've racked up too many retries, abort */ do_end_request = 1; } if (rq->cmd_type != REQ_TYPE_FS) { rq->cmd_flags |= REQ_FAILED; do_end_request = 1; } /* * End a request through request sense analysis when we have sense data. * We need this in order to perform end of media processing. */ if (do_end_request) goto end_request; /* if we got a CHECK_CONDITION status, queue a request sense command */ if (stat & ATA_ERR) return ide_queue_sense_rq(drive, NULL) ? 2 : 1; return 1; end_request: if (stat & ATA_ERR) { hwif->rq = NULL; return ide_queue_sense_rq(drive, rq) ? 2 : 1; } else return 2; } static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd) { struct request *rq = cmd->rq; ide_debug_log(IDE_DBG_FUNC, "rq->cmd[0]: 0x%x", rq->cmd[0]); /* * Some of the trailing request sense fields are optional, * and some drives don't send them. Sigh. */ if (rq->cmd[0] == GPCMD_REQUEST_SENSE && cmd->nleft > 0 && cmd->nleft <= 5) cmd->nleft = 0; } int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, int write, void *buffer, unsigned *bufflen, struct request_sense *sense, int timeout, unsigned int cmd_flags) { struct cdrom_info *info = drive->driver_data; struct request_sense local_sense; int retries = 10; unsigned int flags = 0; if (!sense) sense = &local_sense; ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, " "cmd_flags: 0x%x", cmd[0], write, timeout, cmd_flags); /* start of retry loop */ do { struct request *rq; int error; rq = blk_get_request(drive->queue, write, __GFP_WAIT); memcpy(rq->cmd, cmd, BLK_MAX_CDB); rq->cmd_type = REQ_TYPE_ATA_PC; rq->sense = sense; rq->cmd_flags |= cmd_flags; rq->timeout = timeout; if (buffer) { error = blk_rq_map_kern(drive->queue, rq, buffer, *bufflen, GFP_NOIO); if (error) { blk_put_request(rq); return error; } } error = blk_execute_rq(drive->queue, info->disk, rq, 0); if (buffer) *bufflen = rq->resid_len; flags = rq->cmd_flags; blk_put_request(rq); /* * FIXME: we should probably abort/retry or something in case of * failure. */ if (flags & REQ_FAILED) { /* * The request failed. Retry if it was due to a unit * attention status (usually means media was changed). */ struct request_sense *reqbuf = sense; if (reqbuf->sense_key == UNIT_ATTENTION) cdrom_saw_media_change(drive); else if (reqbuf->sense_key == NOT_READY && reqbuf->asc == 4 && reqbuf->ascq != 4) { /* * The drive is in the process of loading * a disk. Retry, but wait a little to give * the drive time to complete the load. */ ssleep(2); } else { /* otherwise, don't retry */ retries = 0; } --retries; } /* end of retry loop */ } while ((flags & REQ_FAILED) && retries >= 0); /* return an error if the command failed */ return (flags & REQ_FAILED) ? -EIO : 0; } /* * returns true if rq has been completed */ static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd) { unsigned int nr_bytes = cmd->nbytes - cmd->nleft; if (cmd->tf_flags & IDE_TFLAG_WRITE) nr_bytes -= cmd->last_xfer_len; if (nr_bytes > 0) { ide_complete_rq(drive, 0, nr_bytes); return true; } return false; } static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct ide_cmd *cmd = &hwif->cmd; struct request *rq = hwif->rq; ide_expiry_t *expiry = NULL; int dma_error = 0, dma, thislen, uptodate = 0; int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0; int sense = (rq->cmd_type == REQ_TYPE_SENSE); unsigned int timeout; u16 len; u8 ireason, stat; ide_debug_log(IDE_DBG_PC, "cmd: 0x%x, write: 0x%x", rq->cmd[0], write); /* check for errors */ dma = drive->dma; if (dma) { drive->dma = 0; drive->waiting_for_dma = 0; dma_error = hwif->dma_ops->dma_end(drive); ide_dma_unmap_sg(drive, cmd); if (dma_error) { printk(KERN_ERR PFX "%s: DMA %s error\n", drive->name, write ? "write" : "read"); ide_dma_off(drive); } } /* check status */ stat = hwif->tp_ops->read_status(hwif); if (!OK_STAT(stat, 0, BAD_R_STAT)) { rc = cdrom_decode_status(drive, stat); if (rc) { if (rc == 2) goto out_end; return ide_stopped; } } /* using dma, transfer is complete now */ if (dma) { if (dma_error) return ide_error(drive, "dma error", stat); uptodate = 1; goto out_end; } ide_read_bcount_and_ireason(drive, &len, &ireason); thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft; if (thislen > len) thislen = len; ide_debug_log(IDE_DBG_PC, "DRQ: stat: 0x%x, thislen: %d", stat, thislen); /* If DRQ is clear, the command has completed. */ if ((stat & ATA_DRQ) == 0) { if (rq->cmd_type == REQ_TYPE_FS) { /* * If we're not done reading/writing, complain. * Otherwise, complete the command normally. */ uptodate = 1; if (cmd->nleft > 0) { printk(KERN_ERR PFX "%s: %s: data underrun " "(%u bytes)\n", drive->name, __func__, cmd->nleft); if (!write) rq->cmd_flags |= REQ_FAILED; uptodate = 0; } } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { ide_cd_request_sense_fixup(drive, cmd); uptodate = cmd->nleft ? 0 : 1; /* * suck out the remaining bytes from the drive in an * attempt to complete the data xfer. (see BZ#13399) */ if (!(stat & ATA_ERR) && !uptodate && thislen) { ide_pio_bytes(drive, cmd, write, thislen); uptodate = cmd->nleft ? 0 : 1; } if (!uptodate) rq->cmd_flags |= REQ_FAILED; } goto out_end; } rc = ide_check_ireason(drive, rq, len, ireason, write); if (rc) goto out_end; cmd->last_xfer_len = 0; ide_debug_log(IDE_DBG_PC, "data transfer, rq->cmd_type: 0x%x, " "ireason: 0x%x", rq->cmd_type, ireason); /* transfer data */ while (thislen > 0) { int blen = min_t(int, thislen, cmd->nleft); if (cmd->nleft == 0) break; ide_pio_bytes(drive, cmd, write, blen); cmd->last_xfer_len += blen; thislen -= blen; len -= blen; if (sense && write == 0) rq->sense_len += blen; } /* pad, if necessary */ if (len > 0) { if (rq->cmd_type != REQ_TYPE_FS || write == 0) ide_pad_transfer(drive, write, len); else { printk(KERN_ERR PFX "%s: confused, missing data\n", drive->name); blk_dump_rq_flags(rq, "cdrom_newpc_intr"); } } if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { timeout = rq->timeout; } else { timeout = ATAPI_WAIT_PC; if (rq->cmd_type != REQ_TYPE_FS) expiry = ide_cd_expiry; } hwif->expiry = expiry; ide_set_handler(drive, cdrom_newpc_intr, timeout); return ide_started; out_end: if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) { rq->resid_len = 0; blk_end_request_all(rq, 0); hwif->rq = NULL; } else { if (sense && uptodate) ide_cd_complete_failed_rq(drive, rq); if (rq->cmd_type == REQ_TYPE_FS) { if (cmd->nleft == 0) uptodate = 1; } else { if (uptodate <= 0 && rq->errors == 0) rq->errors = -EIO; } if (uptodate == 0 && rq->bio) if (ide_cd_error_cmd(drive, cmd)) return ide_stopped; /* make sure it's fully ended */ if (rq->cmd_type != REQ_TYPE_FS) { rq->resid_len -= cmd->nbytes - cmd->nleft; if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) rq->resid_len += cmd->last_xfer_len; } ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq)); if (sense && rc == 2) ide_error(drive, "request sense failure", stat); } return ide_stopped; } static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq) { struct cdrom_info *cd = drive->driver_data; struct request_queue *q = drive->queue; int write = rq_data_dir(rq) == WRITE; unsigned short sectors_per_frame = queue_logical_block_size(q) >> SECTOR_BITS; ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " "secs_per_frame: %u", rq->cmd[0], rq->cmd_flags, sectors_per_frame); if (write) { /* disk has become write protected */ if (get_disk_ro(cd->disk)) return ide_stopped; } else { /* * We may be retrying this request after an error. Fix up any * weirdness which might be present in the request packet. */ q->prep_rq_fn(q, rq); } /* fs requests *must* be hardware frame aligned */ if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) || (blk_rq_pos(rq) & (sectors_per_frame - 1))) return ide_stopped; /* use DMA, if possible */ drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); if (write) cd->devinfo.media_written = 1; rq->timeout = ATAPI_WAIT_PC; return ide_started; } static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) { ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x", rq->cmd[0], rq->cmd_type); if (rq->cmd_type == REQ_TYPE_BLOCK_PC) rq->cmd_flags |= REQ_QUIET; else rq->cmd_flags &= ~REQ_FAILED; drive->dma = 0; /* sg request */ if (rq->bio) { struct request_queue *q = drive->queue; char *buf = bio_data(rq->bio); unsigned int alignment; drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); /* * check if dma is safe * * NOTE! The "len" and "addr" checks should possibly have * separate masks. */ alignment = queue_dma_alignment(q) | q->dma_pad_mask; if ((unsigned long)buf & alignment || blk_rq_bytes(rq) & q->dma_pad_mask || object_is_on_stack(buf)) drive->dma = 0; } } static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, sector_t block) { struct ide_cmd cmd; int uptodate = 0; unsigned int nsectors; ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, block: %llu", rq->cmd[0], (unsigned long long)block); if (drive->debug_mask & IDE_DBG_RQ) blk_dump_rq_flags(rq, "ide_cd_do_request"); switch (rq->cmd_type) { case REQ_TYPE_FS: if (cdrom_start_rw(drive, rq) == ide_stopped) goto out_end; break; case REQ_TYPE_SENSE: case REQ_TYPE_BLOCK_PC: case REQ_TYPE_ATA_PC: if (!rq->timeout) rq->timeout = ATAPI_WAIT_PC; cdrom_do_block_pc(drive, rq); break; case REQ_TYPE_SPECIAL: /* right now this can only be a reset... */ uptodate = 1; goto out_end; default: BUG(); } /* prepare sense request for this command */ ide_prep_sense(drive, rq); memset(&cmd, 0, sizeof(cmd)); if (rq_data_dir(rq)) cmd.tf_flags |= IDE_TFLAG_WRITE; cmd.rq = rq; if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); ide_map_sg(drive, &cmd); } return ide_issue_pc(drive, &cmd); out_end: nsectors = blk_rq_sectors(rq); if (nsectors == 0) nsectors = 1; ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9); return ide_stopped; } /* * Ioctl handling. * * Routines which queue packet commands take as a final argument a pointer to a * request_sense struct. If execution of the command results in an error with a * CHECK CONDITION status, this structure will be filled with the results of the * subsequent request sense command. The pointer can also be NULL, in which case * no sense information is returned. */ static void msf_from_bcd(struct atapi_msf *msf) { msf->minute = bcd2bin(msf->minute); msf->second = bcd2bin(msf->second); msf->frame = bcd2bin(msf->frame); } int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) { struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *cdi = &info->devinfo; unsigned char cmd[BLK_MAX_CDB]; ide_debug_log(IDE_DBG_FUNC, "enter"); memset(cmd, 0, BLK_MAX_CDB); cmd[0] = GPCMD_TEST_UNIT_READY; /* * Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to switch CDs * instead of supporting the LOAD_UNLOAD opcode. */ cmd[7] = cdi->sanyo_slot % 3; return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET); } static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, unsigned long *sectors_per_frame, struct request_sense *sense) { struct { __be32 lba; __be32 blocklen; } capbuf; int stat; unsigned char cmd[BLK_MAX_CDB]; unsigned len = sizeof(capbuf); u32 blocklen; ide_debug_log(IDE_DBG_FUNC, "enter"); memset(cmd, 0, BLK_MAX_CDB); cmd[0] = GPCMD_READ_CDVD_CAPACITY; stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0, REQ_QUIET); if (stat) return stat; /* * Sanity check the given block size, in so far as making * sure the sectors_per_frame we give to the caller won't * end up being bogus. */ blocklen = be32_to_cpu(capbuf.blocklen); blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS; switch (blocklen) { case 512: case 1024: case 2048: case 4096: break; default: printk_once(KERN_ERR PFX "%s: weird block size %u; " "setting default block size to 2048\n", drive->name, blocklen); blocklen = 2048; break; } *capacity = 1 + be32_to_cpu(capbuf.lba); *sectors_per_frame = blocklen >> SECTOR_BITS; ide_debug_log(IDE_DBG_PROBE, "cap: %lu, sectors_per_frame: %lu", *capacity, *sectors_per_frame); return 0; } static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag, int format, char *buf, int buflen, struct request_sense *sense) { unsigned char cmd[BLK_MAX_CDB]; ide_debug_log(IDE_DBG_FUNC, "enter"); memset(cmd, 0, BLK_MAX_CDB); cmd[0] = GPCMD_READ_TOC_PMA_ATIP; cmd[6] = trackno; cmd[7] = (buflen >> 8); cmd[8] = (buflen & 0xff); cmd[9] = (format << 6); if (msf_flag) cmd[1] = 2; return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, REQ_QUIET); } /* Try to read the entire TOC for the disk into our internal buffer. */ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) { int stat, ntracks, i; struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *cdi = &info->devinfo; struct atapi_toc *toc = info->toc; struct { struct atapi_toc_header hdr; struct atapi_toc_entry ent; } ms_tmp; long last_written; unsigned long sectors_per_frame = SECTORS_PER_FRAME; ide_debug_log(IDE_DBG_FUNC, "enter"); if (toc == NULL) { /* try to allocate space */ toc = kmalloc(sizeof(struct atapi_toc), GFP_KERNEL); if (toc == NULL) { printk(KERN_ERR PFX "%s: No cdrom TOC buffer!\n", drive->name); return -ENOMEM; } info->toc = toc; } /* * Check to see if the existing data is still valid. If it is, * just return. */ (void) cdrom_check_status(drive, sense); if (drive->atapi_flags & IDE_AFLAG_TOC_VALID) return 0; /* try to get the total cdrom capacity and sector size */ stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame, sense); if (stat) toc->capacity = 0x1fffff; set_capacity(info->disk, toc->capacity * sectors_per_frame); /* save a private copy of the TOC capacity for error handling */ drive->probed_capacity = toc->capacity * sectors_per_frame; blk_queue_logical_block_size(drive->queue, sectors_per_frame << SECTOR_BITS); /* first read just the header, so we know how long the TOC is */ stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, sizeof(struct atapi_toc_header), sense); if (stat) return stat; if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { toc->hdr.first_track = bcd2bin(toc->hdr.first_track); toc->hdr.last_track = bcd2bin(toc->hdr.last_track); } ntracks = toc->hdr.last_track - toc->hdr.first_track + 1; if (ntracks <= 0) return -EIO; if (ntracks > MAX_TRACKS) ntracks = MAX_TRACKS; /* now read the whole schmeer */ stat = cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0, (char *)&toc->hdr, sizeof(struct atapi_toc_header) + (ntracks + 1) * sizeof(struct atapi_toc_entry), sense); if (stat && toc->hdr.first_track > 1) { /* * Cds with CDI tracks only don't have any TOC entries, despite * of this the returned values are * first_track == last_track = number of CDI tracks + 1, * so that this case is indistinguishable from the same layout * plus an additional audio track. If we get an error for the * regular case, we assume a CDI without additional audio * tracks. In this case the readable TOC is empty (CDI tracks * are not included) and only holds the Leadout entry. * * Heiko Eißfeldt. */ ntracks = 0; stat = cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0, (char *)&toc->hdr, sizeof(struct atapi_toc_header) + (ntracks + 1) * sizeof(struct atapi_toc_entry), sense); if (stat) return stat; if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { toc->hdr.first_track = (u8)bin2bcd(CDROM_LEADOUT); toc->hdr.last_track = (u8)bin2bcd(CDROM_LEADOUT); } else { toc->hdr.first_track = CDROM_LEADOUT; toc->hdr.last_track = CDROM_LEADOUT; } } if (stat) return stat; toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length); if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { toc->hdr.first_track = bcd2bin(toc->hdr.first_track); toc->hdr.last_track = bcd2bin(toc->hdr.last_track); } for (i = 0; i <= ntracks; i++) { if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) { if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) toc->ent[i].track = bcd2bin(toc->ent[i].track); msf_from_bcd(&toc->ent[i].addr.msf); } toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute, toc->ent[i].addr.msf.second, toc->ent[i].addr.msf.frame); } if (toc->hdr.first_track != CDROM_LEADOUT) { /* read the multisession information */ stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp, sizeof(ms_tmp), sense); if (stat) return stat; toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba); } else { ms_tmp.hdr.last_track = CDROM_LEADOUT; ms_tmp.hdr.first_track = ms_tmp.hdr.last_track; toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */ } if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) { /* re-read multisession information using MSF format */ stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp, sizeof(ms_tmp), sense); if (stat) return stat; msf_from_bcd(&ms_tmp.ent.addr.msf); toc->last_session_lba = msf_to_lba(ms_tmp.ent.addr.msf.minute, ms_tmp.ent.addr.msf.second, ms_tmp.ent.addr.msf.frame); } toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track); /* now try to get the total cdrom capacity */ stat = cdrom_get_last_written(cdi, &last_written); if (!stat && (last_written > toc->capacity)) { toc->capacity = last_written; set_capacity(info->disk, toc->capacity * sectors_per_frame); drive->probed_capacity = toc->capacity * sectors_per_frame; } /* Remember that we've read this stuff. */ drive->atapi_flags |= IDE_AFLAG_TOC_VALID; return 0; } int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf) { struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *cdi = &info->devinfo; struct packet_command cgc; int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE; ide_debug_log(IDE_DBG_FUNC, "enter"); if ((drive->atapi_flags & IDE_AFLAG_FULL_CAPS_PAGE) == 0) size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE; init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN); do { /* we seem to get stat=0x01,err=0x00 the first time (??) */ stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0); if (!stat) break; } while (--attempts); return stat; } void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf) { struct cdrom_info *cd = drive->driver_data; u16 curspeed, maxspeed; ide_debug_log(IDE_DBG_FUNC, "enter"); if (drive->atapi_flags & IDE_AFLAG_LE_SPEED_FIELDS) { curspeed = le16_to_cpup((__le16 *)&buf[8 + 14]); maxspeed = le16_to_cpup((__le16 *)&buf[8 + 8]); } else { curspeed = be16_to_cpup((__be16 *)&buf[8 + 14]); maxspeed = be16_to_cpup((__be16 *)&buf[8 + 8]); } ide_debug_log(IDE_DBG_PROBE, "curspeed: %u, maxspeed: %u", curspeed, maxspeed); cd->current_speed = DIV_ROUND_CLOSEST(curspeed, 176); cd->max_speed = DIV_ROUND_CLOSEST(maxspeed, 176); } #define IDE_CD_CAPABILITIES \ (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | \ CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | \ CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R | \ CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_GENERIC_PACKET | \ CDC_MO_DRIVE | CDC_MRW | CDC_MRW_W | CDC_RAM) static struct cdrom_device_ops ide_cdrom_dops = { .open = ide_cdrom_open_real, .release = ide_cdrom_release_real, .drive_status = ide_cdrom_drive_status, .check_events = ide_cdrom_check_events_real, .tray_move = ide_cdrom_tray_move, .lock_door = ide_cdrom_lock_door, .select_speed = ide_cdrom_select_speed, .get_last_session = ide_cdrom_get_last_session, .get_mcn = ide_cdrom_get_mcn, .reset = ide_cdrom_reset, .audio_ioctl = ide_cdrom_audio_ioctl, .capability = IDE_CD_CAPABILITIES, .generic_packet = ide_cdrom_packet, }; static int ide_cdrom_register(ide_drive_t *drive, int nslots) { struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *devinfo = &info->devinfo; ide_debug_log(IDE_DBG_PROBE, "nslots: %d", nslots); devinfo->ops = &ide_cdrom_dops; devinfo->speed = info->current_speed; devinfo->capacity = nslots; devinfo->handle = drive; strcpy(devinfo->name, drive->name); if (drive->atapi_flags & IDE_AFLAG_NO_SPEED_SELECT) devinfo->mask |= CDC_SELECT_SPEED; devinfo->disk = info->disk; return register_cdrom(devinfo); } static int ide_cdrom_probe_capabilities(ide_drive_t *drive) { struct cdrom_info *cd = drive->driver_data; struct cdrom_device_info *cdi = &cd->devinfo; u8 buf[ATAPI_CAPABILITIES_PAGE_SIZE]; mechtype_t mechtype; int nslots = 1; ide_debug_log(IDE_DBG_PROBE, "media: 0x%x, atapi_flags: 0x%lx", drive->media, drive->atapi_flags); cdi->mask = (CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_PLAY_AUDIO | CDC_MO_DRIVE | CDC_RAM); if (drive->media == ide_optical) { cdi->mask &= ~(CDC_MO_DRIVE | CDC_RAM); printk(KERN_ERR PFX "%s: ATAPI magneto-optical drive\n", drive->name); return nslots; } if (drive->atapi_flags & IDE_AFLAG_PRE_ATAPI12) { drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT; cdi->mask &= ~CDC_PLAY_AUDIO; return nslots; } /* * We have to cheat a little here. the packet will eventually be queued * with ide_cdrom_packet(), which extracts the drive from cdi->handle. * Since this device hasn't been registered with the Uniform layer yet, * it can't do this. Same goes for cdi->ops. */ cdi->handle = drive; cdi->ops = &ide_cdrom_dops; if (ide_cdrom_get_capabilities(drive, buf)) return 0; if ((buf[8 + 6] & 0x01) == 0) drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING; if (buf[8 + 6] & 0x08) drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT; if (buf[8 + 3] & 0x01) cdi->mask &= ~CDC_CD_R; if (buf[8 + 3] & 0x02) cdi->mask &= ~(CDC_CD_RW | CDC_RAM); if (buf[8 + 2] & 0x38) cdi->mask &= ~CDC_DVD; if (buf[8 + 3] & 0x20) cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM); if (buf[8 + 3] & 0x10) cdi->mask &= ~CDC_DVD_R; if ((buf[8 + 4] & 0x01) || (drive->atapi_flags & IDE_AFLAG_PLAY_AUDIO_OK)) cdi->mask &= ~CDC_PLAY_AUDIO; mechtype = buf[8 + 6] >> 5; if (mechtype == mechtype_caddy || mechtype == mechtype_popup || (drive->atapi_flags & IDE_AFLAG_NO_AUTOCLOSE)) cdi->mask |= CDC_CLOSE_TRAY; if (cdi->sanyo_slot > 0) { cdi->mask &= ~CDC_SELECT_DISC; nslots = 3; } else if (mechtype == mechtype_individual_changer || mechtype == mechtype_cartridge_changer) { nslots = cdrom_number_of_slots(cdi); if (nslots > 1) cdi->mask &= ~CDC_SELECT_DISC; } ide_cdrom_update_speed(drive, buf); printk(KERN_INFO PFX "%s: ATAPI", drive->name); /* don't print speed if the drive reported 0 */ if (cd->max_speed) printk(KERN_CONT " %dX", cd->max_speed); printk(KERN_CONT " %s", (cdi->mask & CDC_DVD) ? "CD-ROM" : "DVD-ROM"); if ((cdi->mask & CDC_DVD_R) == 0 || (cdi->mask & CDC_DVD_RAM) == 0) printk(KERN_CONT " DVD%s%s", (cdi->mask & CDC_DVD_R) ? "" : "-R", (cdi->mask & CDC_DVD_RAM) ? "" : "/RAM"); if ((cdi->mask & CDC_CD_R) == 0 || (cdi->mask & CDC_CD_RW) == 0) printk(KERN_CONT " CD%s%s", (cdi->mask & CDC_CD_R) ? "" : "-R", (cdi->mask & CDC_CD_RW) ? "" : "/RW"); if ((cdi->mask & CDC_SELECT_DISC) == 0) printk(KERN_CONT " changer w/%d slots", nslots); else printk(KERN_CONT " drive"); printk(KERN_CONT ", %dkB Cache\n", be16_to_cpup((__be16 *)&buf[8 + 12])); return nslots; } /* standard prep_rq_fn that builds 10 byte cmds */ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) { int hard_sect = queue_logical_block_size(q); long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); memset(rq->cmd, 0, BLK_MAX_CDB); if (rq_data_dir(rq) == READ) rq->cmd[0] = GPCMD_READ_10; else rq->cmd[0] = GPCMD_WRITE_10; /* * fill in lba */ rq->cmd[2] = (block >> 24) & 0xff; rq->cmd[3] = (block >> 16) & 0xff; rq->cmd[4] = (block >> 8) & 0xff; rq->cmd[5] = block & 0xff; /* * and transfer length */ rq->cmd[7] = (blocks >> 8) & 0xff; rq->cmd[8] = blocks & 0xff; rq->cmd_len = 10; return BLKPREP_OK; } /* * Most of the SCSI commands are supported directly by ATAPI devices. * This transform handles the few exceptions. */ static int ide_cdrom_prep_pc(struct request *rq) { u8 *c = rq->cmd; /* transform 6-byte read/write commands to the 10-byte version */ if (c[0] == READ_6 || c[0] == WRITE_6) { c[8] = c[4]; c[5] = c[3]; c[4] = c[2]; c[3] = c[1] & 0x1f; c[2] = 0; c[1] &= 0xe0; c[0] += (READ_10 - READ_6); rq->cmd_len = 10; return BLKPREP_OK; } /* * it's silly to pretend we understand 6-byte sense commands, just * reject with ILLEGAL_REQUEST and the caller should take the * appropriate action */ if (c[0] == MODE_SENSE || c[0] == MODE_SELECT) { rq->errors = ILLEGAL_REQUEST; return BLKPREP_KILL; } return BLKPREP_OK; } static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) { if (rq->cmd_type == REQ_TYPE_FS) return ide_cdrom_prep_fs(q, rq); else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) return ide_cdrom_prep_pc(rq); return 0; } struct cd_list_entry { const char *id_model; const char *id_firmware; unsigned int cd_flags; }; #ifdef CONFIG_IDE_PROC_FS static sector_t ide_cdrom_capacity(ide_drive_t *drive) { unsigned long capacity, sectors_per_frame; if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL)) return 0; return capacity * sectors_per_frame; } static int idecd_capacity_proc_show(struct seq_file *m, void *v) { ide_drive_t *drive = m->private; seq_printf(m, "%llu\n", (long long)ide_cdrom_capacity(drive)); return 0; } static int idecd_capacity_proc_open(struct inode *inode, struct file *file) { return single_open(file, idecd_capacity_proc_show, PDE(inode)->data); } static const struct file_operations idecd_capacity_proc_fops = { .owner = THIS_MODULE, .open = idecd_capacity_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static ide_proc_entry_t idecd_proc[] = { { "capacity", S_IFREG|S_IRUGO, &idecd_capacity_proc_fops }, {} }; static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive) { return idecd_proc; } static const struct ide_proc_devset *ide_cd_proc_devsets(ide_drive_t *drive) { return NULL; } #endif static const struct cd_list_entry ide_cd_quirks_list[] = { /* SCR-3231 doesn't support the SET_CD_SPEED command. */ { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_AFLAG_NO_SPEED_SELECT }, /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */ { "NEC CD-ROM DRIVE:260", "1.01", IDE_AFLAG_TOCADDR_AS_BCD | IDE_AFLAG_PRE_ATAPI12, }, /* Vertos 300, some versions of this drive like to talk BCD. */ { "V003S0DS", NULL, IDE_AFLAG_VERTOS_300_SSD, }, /* Vertos 600 ESD. */ { "V006E0DS", NULL, IDE_AFLAG_VERTOS_600_ESD, }, /* * Sanyo 3 CD changer uses a non-standard command for CD changing * (by default standard ATAPI support for CD changers is used). */ { "CD-ROM CDR-C3 G", NULL, IDE_AFLAG_SANYO_3CD }, { "CD-ROM CDR-C3G", NULL, IDE_AFLAG_SANYO_3CD }, { "CD-ROM CDR_C36", NULL, IDE_AFLAG_SANYO_3CD }, /* Stingray 8X CD-ROM. */ { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_AFLAG_PRE_ATAPI12 }, /* * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length * mode sense page capabilities size, but older drives break. */ { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_AFLAG_FULL_CAPS_PAGE }, { "WPI CDS-32X", NULL, IDE_AFLAG_FULL_CAPS_PAGE }, /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */ { "", "241N", IDE_AFLAG_LE_SPEED_FIELDS }, /* * Some drives used by Apple don't advertise audio play * but they do support reading TOC & audio datas. */ { "MATSHITADVD-ROM SR-8187", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, { "MATSHITADVD-ROM SR-8186", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, { "Optiarc DVD RW AD-7200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, { "Optiarc DVD RW AD-7543A", NULL, IDE_AFLAG_NO_AUTOCLOSE }, { "TEAC CD-ROM CD-224E", NULL, IDE_AFLAG_NO_AUTOCLOSE }, { NULL, NULL, 0 } }; static unsigned int ide_cd_flags(u16 *id) { const struct cd_list_entry *cle = ide_cd_quirks_list; while (cle->id_model) { if (strcmp(cle->id_model, (char *)&id[ATA_ID_PROD]) == 0 && (cle->id_firmware == NULL || strstr((char *)&id[ATA_ID_FW_REV], cle->id_firmware))) return cle->cd_flags; cle++; } return 0; } static int ide_cdrom_setup(ide_drive_t *drive) { struct cdrom_info *cd = drive->driver_data; struct cdrom_device_info *cdi = &cd->devinfo; struct request_queue *q = drive->queue; u16 *id = drive->id; char *fw_rev = (char *)&id[ATA_ID_FW_REV]; int nslots; ide_debug_log(IDE_DBG_PROBE, "enter"); blk_queue_prep_rq(q, ide_cdrom_prep_fn); blk_queue_dma_alignment(q, 31); blk_queue_update_dma_pad(q, 15); drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED; drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id); if ((drive->atapi_flags & IDE_AFLAG_VERTOS_300_SSD) && fw_rev[4] == '1' && fw_rev[6] <= '2') drive->atapi_flags |= (IDE_AFLAG_TOCTRACKS_AS_BCD | IDE_AFLAG_TOCADDR_AS_BCD); else if ((drive->atapi_flags & IDE_AFLAG_VERTOS_600_ESD) && fw_rev[4] == '1' && fw_rev[6] <= '2') drive->atapi_flags |= IDE_AFLAG_TOCTRACKS_AS_BCD; else if (drive->atapi_flags & IDE_AFLAG_SANYO_3CD) /* 3 => use CD in slot 0 */ cdi->sanyo_slot = 3; nslots = ide_cdrom_probe_capabilities(drive); blk_queue_logical_block_size(q, CD_FRAMESIZE); if (ide_cdrom_register(drive, nslots)) { printk(KERN_ERR PFX "%s: %s failed to register device with the" " cdrom driver.\n", drive->name, __func__); cd->devinfo.handle = NULL; return 1; } ide_proc_register_driver(drive, cd->driver); return 0; } static void ide_cd_remove(ide_drive_t *drive) { struct cdrom_info *info = drive->driver_data; ide_debug_log(IDE_DBG_FUNC, "enter"); ide_proc_unregister_driver(drive, info->driver); device_del(&info->dev); del_gendisk(info->disk); mutex_lock(&idecd_ref_mutex); put_device(&info->dev); mutex_unlock(&idecd_ref_mutex); } static void ide_cd_release(struct device *dev) { struct cdrom_info *info = to_ide_drv(dev, cdrom_info); struct cdrom_device_info *devinfo = &info->devinfo; ide_drive_t *drive = info->drive; struct gendisk *g = info->disk; ide_debug_log(IDE_DBG_FUNC, "enter"); kfree(info->toc); if (devinfo->handle == drive) unregister_cdrom(devinfo); drive->driver_data = NULL; blk_queue_prep_rq(drive->queue, NULL); g->private_data = NULL; put_disk(g); kfree(info); } static int ide_cd_probe(ide_drive_t *); static struct ide_driver ide_cdrom_driver = { .gen_driver = { .owner = THIS_MODULE, .name = "ide-cdrom", .bus = &ide_bus_type, }, .probe = ide_cd_probe, .remove = ide_cd_remove, .version = IDECD_VERSION, .do_request = ide_cd_do_request, #ifdef CONFIG_IDE_PROC_FS .proc_entries = ide_cd_proc_entries, .proc_devsets = ide_cd_proc_devsets, #endif }; static int idecd_open(struct block_device *bdev, fmode_t mode) { struct cdrom_info *info; int rc = -ENXIO; mutex_lock(&ide_cd_mutex); info = ide_cd_get(bdev->bd_disk); if (!info) goto out; rc = cdrom_open(&info->devinfo, bdev, mode); if (rc < 0) ide_cd_put(info); out: mutex_unlock(&ide_cd_mutex); return rc; } static int idecd_release(struct gendisk *disk, fmode_t mode) { struct cdrom_info *info = ide_drv_g(disk, cdrom_info); mutex_lock(&ide_cd_mutex); cdrom_release(&info->devinfo, mode); ide_cd_put(info); mutex_unlock(&ide_cd_mutex); return 0; } static int idecd_set_spindown(struct cdrom_device_info *cdi, unsigned long arg) { struct packet_command cgc; char buffer[16]; int stat; char spindown; if (copy_from_user(&spindown, (void __user *)arg, sizeof(char))) return -EFAULT; init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN); stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0); if (stat) return stat; buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f); return cdrom_mode_select(cdi, &cgc); } static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg) { struct packet_command cgc; char buffer[16]; int stat; char spindown; init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN); stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0); if (stat) return stat; spindown = buffer[11] & 0x0f; if (copy_to_user((void __user *)arg, &spindown, sizeof(char))) return -EFAULT; return 0; } static int idecd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info); int err; switch (cmd) { case CDROMSETSPINDOWN: return idecd_set_spindown(&info->devinfo, arg); case CDROMGETSPINDOWN: return idecd_get_spindown(&info->devinfo, arg); default: break; } err = generic_ide_ioctl(info->drive, bdev, cmd, arg); if (err == -EINVAL) err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd, arg); return err; } static int idecd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&ide_cd_mutex); ret = idecd_locked_ioctl(bdev, mode, cmd, arg); mutex_unlock(&ide_cd_mutex); return ret; } static unsigned int idecd_check_events(struct gendisk *disk, unsigned int clearing) { struct cdrom_info *info = ide_drv_g(disk, cdrom_info); return cdrom_check_events(&info->devinfo, clearing); } static int idecd_revalidate_disk(struct gendisk *disk) { struct cdrom_info *info = ide_drv_g(disk, cdrom_info); struct request_sense sense; ide_cd_read_toc(info->drive, &sense); return 0; } static const struct block_device_operations idecd_ops = { .owner = THIS_MODULE, .open = idecd_open, .release = idecd_release, .ioctl = idecd_ioctl, .check_events = idecd_check_events, .revalidate_disk = idecd_revalidate_disk }; /* module options */ static unsigned long debug_mask; module_param(debug_mask, ulong, 0644); MODULE_DESCRIPTION("ATAPI CD-ROM Driver"); static int ide_cd_probe(ide_drive_t *drive) { struct cdrom_info *info; struct gendisk *g; struct request_sense sense; ide_debug_log(IDE_DBG_PROBE, "driver_req: %s, media: 0x%x", drive->driver_req, drive->media); if (!strstr("ide-cdrom", drive->driver_req)) goto failed; if (drive->media != ide_cdrom && drive->media != ide_optical) goto failed; drive->debug_mask = debug_mask; drive->irq_handler = cdrom_newpc_intr; info = kzalloc(sizeof(struct cdrom_info), GFP_KERNEL); if (info == NULL) { printk(KERN_ERR PFX "%s: Can't allocate a cdrom structure\n", drive->name); goto failed; } g = alloc_disk(1 << PARTN_BITS); if (!g) goto out_free_cd; ide_init_disk(g, drive); info->dev.parent = &drive->gendev; info->dev.release = ide_cd_release; dev_set_name(&info->dev, dev_name(&drive->gendev)); if (device_register(&info->dev)) goto out_free_disk; info->drive = drive; info->driver = &ide_cdrom_driver; info->disk = g; g->private_data = &info->driver; drive->driver_data = info; g->minors = 1; g->driverfs_dev = &drive->gendev; g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE; if (ide_cdrom_setup(drive)) { put_device(&info->dev); goto failed; } ide_cd_read_toc(drive, &sense); g->fops = &idecd_ops; g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; add_disk(g); return 0; out_free_disk: put_disk(g); out_free_cd: kfree(info); failed: return -ENODEV; } static void __exit ide_cdrom_exit(void) { driver_unregister(&ide_cdrom_driver.gen_driver); } static int __init ide_cdrom_init(void) { printk(KERN_INFO DRV_NAME " driver " IDECD_VERSION "\n"); return driver_register(&ide_cdrom_driver.gen_driver); } MODULE_ALIAS("ide:*m-cdrom*"); MODULE_ALIAS("ide-cd"); module_init(ide_cdrom_init); module_exit(ide_cdrom_exit); MODULE_LICENSE("GPL");
gpl-2.0
HighwindONE/android_kernel_motorola_msm8226
drivers/ide/ide-cd.c
5370
46958
/* * ATAPI CD-ROM driver. * * Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov> * Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org> * Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de> * Copyright (C) 2005, 2007-2009 Bartlomiej Zolnierkiewicz * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. * * See Documentation/cdrom/ide-cd for usage information. * * Suggestions are welcome. Patches that work are more welcome though. ;-) * * Documentation: * Mt. Fuji (SFF8090 version 4) and ATAPI (SFF-8020i rev 2.6) standards. * * For historical changelog please see: * Documentation/ide/ChangeLog.ide-cd.1994-2004 */ #define DRV_NAME "ide-cd" #define PFX DRV_NAME ": " #define IDECD_VERSION "5.00" #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/cdrom.h> #include <linux/ide.h> #include <linux/completion.h> #include <linux/mutex.h> #include <linux/bcd.h> /* For SCSI -> ATAPI command conversion */ #include <scsi/scsi.h> #include <linux/io.h> #include <asm/byteorder.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include "ide-cd.h" static DEFINE_MUTEX(ide_cd_mutex); static DEFINE_MUTEX(idecd_ref_mutex); static void ide_cd_release(struct device *); static struct cdrom_info *ide_cd_get(struct gendisk *disk) { struct cdrom_info *cd = NULL; mutex_lock(&idecd_ref_mutex); cd = ide_drv_g(disk, cdrom_info); if (cd) { if (ide_device_get(cd->drive)) cd = NULL; else get_device(&cd->dev); } mutex_unlock(&idecd_ref_mutex); return cd; } static void ide_cd_put(struct cdrom_info *cd) { ide_drive_t *drive = cd->drive; mutex_lock(&idecd_ref_mutex); put_device(&cd->dev); ide_device_put(drive); mutex_unlock(&idecd_ref_mutex); } /* * Generic packet command support and error handling routines. */ /* Mark that we've seen a media change and invalidate our internal buffers. */ static void cdrom_saw_media_change(ide_drive_t *drive) { drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED; drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID; } static int cdrom_log_sense(ide_drive_t *drive, struct request *rq) { struct request_sense *sense = &drive->sense_data; int log = 0; if (!sense || !rq || (rq->cmd_flags & REQ_QUIET)) return 0; ide_debug_log(IDE_DBG_SENSE, "sense_key: 0x%x", sense->sense_key); switch (sense->sense_key) { case NO_SENSE: case RECOVERED_ERROR: break; case NOT_READY: /* * don't care about tray state messages for e.g. capacity * commands or in-progress or becoming ready */ if (sense->asc == 0x3a || sense->asc == 0x04) break; log = 1; break; case ILLEGAL_REQUEST: /* * don't log START_STOP unit with LoEj set, since we cannot * reliably check if drive can auto-close */ if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24) break; log = 1; break; case UNIT_ATTENTION: /* * Make good and sure we've seen this potential media change. * Some drives (i.e. Creative) fail to present the correct sense * key in the error register. */ cdrom_saw_media_change(drive); break; default: log = 1; break; } return log; } static void cdrom_analyze_sense_data(ide_drive_t *drive, struct request *failed_command) { struct request_sense *sense = &drive->sense_data; struct cdrom_info *info = drive->driver_data; unsigned long sector; unsigned long bio_sectors; ide_debug_log(IDE_DBG_SENSE, "error_code: 0x%x, sense_key: 0x%x", sense->error_code, sense->sense_key); if (failed_command) ide_debug_log(IDE_DBG_SENSE, "failed cmd: 0x%x", failed_command->cmd[0]); if (!cdrom_log_sense(drive, failed_command)) return; /* * If a read toc is executed for a CD-R or CD-RW medium where the first * toc has not been recorded yet, it will fail with 05/24/00 (which is a * confusing error) */ if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP) if (sense->sense_key == 0x05 && sense->asc == 0x24) return; /* current error */ if (sense->error_code == 0x70) { switch (sense->sense_key) { case MEDIUM_ERROR: case VOLUME_OVERFLOW: case ILLEGAL_REQUEST: if (!sense->valid) break; if (failed_command == NULL || failed_command->cmd_type != REQ_TYPE_FS) break; sector = (sense->information[0] << 24) | (sense->information[1] << 16) | (sense->information[2] << 8) | (sense->information[3]); if (queue_logical_block_size(drive->queue) == 2048) /* device sector size is 2K */ sector <<= 2; bio_sectors = max(bio_sectors(failed_command->bio), 4U); sector &= ~(bio_sectors - 1); /* * The SCSI specification allows for the value * returned by READ CAPACITY to be up to 75 2K * sectors past the last readable block. * Therefore, if we hit a medium error within the * last 75 2K sectors, we decrease the saved size * value. */ if (sector < get_capacity(info->disk) && drive->probed_capacity - sector < 4 * 75) set_capacity(info->disk, sector); } } ide_cd_log_error(drive->name, failed_command, sense); } static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) { /* * For REQ_TYPE_SENSE, "rq->special" points to the original * failed request. Also, the sense data should be read * directly from rq which might be different from the original * sense buffer if it got copied during mapping. */ struct request *failed = (struct request *)rq->special; void *sense = bio_data(rq->bio); if (failed) { if (failed->sense) { /* * Sense is always read into drive->sense_data. * Copy back if the failed request has its * sense pointer set. */ memcpy(failed->sense, sense, 18); failed->sense_len = rq->sense_len; } cdrom_analyze_sense_data(drive, failed); if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed))) BUG(); } else cdrom_analyze_sense_data(drive, NULL); } /* * Allow the drive 5 seconds to recover; some devices will return NOT_READY * while flushing data from cache. * * returns: 0 failed (write timeout expired) * 1 success */ static int ide_cd_breathe(ide_drive_t *drive, struct request *rq) { struct cdrom_info *info = drive->driver_data; if (!rq->errors) info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY; rq->errors = 1; if (time_after(jiffies, info->write_timeout)) return 0; else { /* * take a breather */ blk_delay_queue(drive->queue, 1); return 1; } } /** * Returns: * 0: if the request should be continued. * 1: if the request will be going through error recovery. * 2: if the request should be ended. */ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) { ide_hwif_t *hwif = drive->hwif; struct request *rq = hwif->rq; int err, sense_key, do_end_request = 0; /* get the IDE error register */ err = ide_read_error(drive); sense_key = err >> 4; ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, rq->cmd_type: 0x%x, err: 0x%x, " "stat 0x%x", rq->cmd[0], rq->cmd_type, err, stat); if (rq->cmd_type == REQ_TYPE_SENSE) { /* * We got an error trying to get sense info from the drive * (probably while trying to recover from a former error). * Just give up. */ rq->cmd_flags |= REQ_FAILED; return 2; } /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */ if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors) rq->errors = SAM_STAT_CHECK_CONDITION; if (blk_noretry_request(rq)) do_end_request = 1; switch (sense_key) { case NOT_READY: if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) { if (ide_cd_breathe(drive, rq)) return 1; } else { cdrom_saw_media_change(drive); if (rq->cmd_type == REQ_TYPE_FS && !(rq->cmd_flags & REQ_QUIET)) printk(KERN_ERR PFX "%s: tray open\n", drive->name); } do_end_request = 1; break; case UNIT_ATTENTION: cdrom_saw_media_change(drive); if (rq->cmd_type != REQ_TYPE_FS) return 0; /* * Arrange to retry the request but be sure to give up if we've * retried too many times. */ if (++rq->errors > ERROR_MAX) do_end_request = 1; break; case ILLEGAL_REQUEST: /* * Don't print error message for this condition -- SFF8090i * indicates that 5/24/00 is the correct response to a request * to close the tray if the drive doesn't have that capability. * * cdrom_log_sense() knows this! */ if (rq->cmd[0] == GPCMD_START_STOP_UNIT) break; /* fall-through */ case DATA_PROTECT: /* * No point in retrying after an illegal request or data * protect error. */ if (!(rq->cmd_flags & REQ_QUIET)) ide_dump_status(drive, "command error", stat); do_end_request = 1; break; case MEDIUM_ERROR: /* * No point in re-trying a zillion times on a bad sector. * If we got here the error is not correctable. */ if (!(rq->cmd_flags & REQ_QUIET)) ide_dump_status(drive, "media error " "(bad sector)", stat); do_end_request = 1; break; case BLANK_CHECK: /* disk appears blank? */ if (!(rq->cmd_flags & REQ_QUIET)) ide_dump_status(drive, "media error (blank)", stat); do_end_request = 1; break; default: if (rq->cmd_type != REQ_TYPE_FS) break; if (err & ~ATA_ABORTED) { /* go to the default handler for other errors */ ide_error(drive, "cdrom_decode_status", stat); return 1; } else if (++rq->errors > ERROR_MAX) /* we've racked up too many retries, abort */ do_end_request = 1; } if (rq->cmd_type != REQ_TYPE_FS) { rq->cmd_flags |= REQ_FAILED; do_end_request = 1; } /* * End a request through request sense analysis when we have sense data. * We need this in order to perform end of media processing. */ if (do_end_request) goto end_request; /* if we got a CHECK_CONDITION status, queue a request sense command */ if (stat & ATA_ERR) return ide_queue_sense_rq(drive, NULL) ? 2 : 1; return 1; end_request: if (stat & ATA_ERR) { hwif->rq = NULL; return ide_queue_sense_rq(drive, rq) ? 2 : 1; } else return 2; } static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd) { struct request *rq = cmd->rq; ide_debug_log(IDE_DBG_FUNC, "rq->cmd[0]: 0x%x", rq->cmd[0]); /* * Some of the trailing request sense fields are optional, * and some drives don't send them. Sigh. */ if (rq->cmd[0] == GPCMD_REQUEST_SENSE && cmd->nleft > 0 && cmd->nleft <= 5) cmd->nleft = 0; } int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, int write, void *buffer, unsigned *bufflen, struct request_sense *sense, int timeout, unsigned int cmd_flags) { struct cdrom_info *info = drive->driver_data; struct request_sense local_sense; int retries = 10; unsigned int flags = 0; if (!sense) sense = &local_sense; ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, " "cmd_flags: 0x%x", cmd[0], write, timeout, cmd_flags); /* start of retry loop */ do { struct request *rq; int error; rq = blk_get_request(drive->queue, write, __GFP_WAIT); memcpy(rq->cmd, cmd, BLK_MAX_CDB); rq->cmd_type = REQ_TYPE_ATA_PC; rq->sense = sense; rq->cmd_flags |= cmd_flags; rq->timeout = timeout; if (buffer) { error = blk_rq_map_kern(drive->queue, rq, buffer, *bufflen, GFP_NOIO); if (error) { blk_put_request(rq); return error; } } error = blk_execute_rq(drive->queue, info->disk, rq, 0); if (buffer) *bufflen = rq->resid_len; flags = rq->cmd_flags; blk_put_request(rq); /* * FIXME: we should probably abort/retry or something in case of * failure. */ if (flags & REQ_FAILED) { /* * The request failed. Retry if it was due to a unit * attention status (usually means media was changed). */ struct request_sense *reqbuf = sense; if (reqbuf->sense_key == UNIT_ATTENTION) cdrom_saw_media_change(drive); else if (reqbuf->sense_key == NOT_READY && reqbuf->asc == 4 && reqbuf->ascq != 4) { /* * The drive is in the process of loading * a disk. Retry, but wait a little to give * the drive time to complete the load. */ ssleep(2); } else { /* otherwise, don't retry */ retries = 0; } --retries; } /* end of retry loop */ } while ((flags & REQ_FAILED) && retries >= 0); /* return an error if the command failed */ return (flags & REQ_FAILED) ? -EIO : 0; } /* * returns true if rq has been completed */ static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd) { unsigned int nr_bytes = cmd->nbytes - cmd->nleft; if (cmd->tf_flags & IDE_TFLAG_WRITE) nr_bytes -= cmd->last_xfer_len; if (nr_bytes > 0) { ide_complete_rq(drive, 0, nr_bytes); return true; } return false; } static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct ide_cmd *cmd = &hwif->cmd; struct request *rq = hwif->rq; ide_expiry_t *expiry = NULL; int dma_error = 0, dma, thislen, uptodate = 0; int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0; int sense = (rq->cmd_type == REQ_TYPE_SENSE); unsigned int timeout; u16 len; u8 ireason, stat; ide_debug_log(IDE_DBG_PC, "cmd: 0x%x, write: 0x%x", rq->cmd[0], write); /* check for errors */ dma = drive->dma; if (dma) { drive->dma = 0; drive->waiting_for_dma = 0; dma_error = hwif->dma_ops->dma_end(drive); ide_dma_unmap_sg(drive, cmd); if (dma_error) { printk(KERN_ERR PFX "%s: DMA %s error\n", drive->name, write ? "write" : "read"); ide_dma_off(drive); } } /* check status */ stat = hwif->tp_ops->read_status(hwif); if (!OK_STAT(stat, 0, BAD_R_STAT)) { rc = cdrom_decode_status(drive, stat); if (rc) { if (rc == 2) goto out_end; return ide_stopped; } } /* using dma, transfer is complete now */ if (dma) { if (dma_error) return ide_error(drive, "dma error", stat); uptodate = 1; goto out_end; } ide_read_bcount_and_ireason(drive, &len, &ireason); thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft; if (thislen > len) thislen = len; ide_debug_log(IDE_DBG_PC, "DRQ: stat: 0x%x, thislen: %d", stat, thislen); /* If DRQ is clear, the command has completed. */ if ((stat & ATA_DRQ) == 0) { if (rq->cmd_type == REQ_TYPE_FS) { /* * If we're not done reading/writing, complain. * Otherwise, complete the command normally. */ uptodate = 1; if (cmd->nleft > 0) { printk(KERN_ERR PFX "%s: %s: data underrun " "(%u bytes)\n", drive->name, __func__, cmd->nleft); if (!write) rq->cmd_flags |= REQ_FAILED; uptodate = 0; } } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { ide_cd_request_sense_fixup(drive, cmd); uptodate = cmd->nleft ? 0 : 1; /* * suck out the remaining bytes from the drive in an * attempt to complete the data xfer. (see BZ#13399) */ if (!(stat & ATA_ERR) && !uptodate && thislen) { ide_pio_bytes(drive, cmd, write, thislen); uptodate = cmd->nleft ? 0 : 1; } if (!uptodate) rq->cmd_flags |= REQ_FAILED; } goto out_end; } rc = ide_check_ireason(drive, rq, len, ireason, write); if (rc) goto out_end; cmd->last_xfer_len = 0; ide_debug_log(IDE_DBG_PC, "data transfer, rq->cmd_type: 0x%x, " "ireason: 0x%x", rq->cmd_type, ireason); /* transfer data */ while (thislen > 0) { int blen = min_t(int, thislen, cmd->nleft); if (cmd->nleft == 0) break; ide_pio_bytes(drive, cmd, write, blen); cmd->last_xfer_len += blen; thislen -= blen; len -= blen; if (sense && write == 0) rq->sense_len += blen; } /* pad, if necessary */ if (len > 0) { if (rq->cmd_type != REQ_TYPE_FS || write == 0) ide_pad_transfer(drive, write, len); else { printk(KERN_ERR PFX "%s: confused, missing data\n", drive->name); blk_dump_rq_flags(rq, "cdrom_newpc_intr"); } } if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { timeout = rq->timeout; } else { timeout = ATAPI_WAIT_PC; if (rq->cmd_type != REQ_TYPE_FS) expiry = ide_cd_expiry; } hwif->expiry = expiry; ide_set_handler(drive, cdrom_newpc_intr, timeout); return ide_started; out_end: if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) { rq->resid_len = 0; blk_end_request_all(rq, 0); hwif->rq = NULL; } else { if (sense && uptodate) ide_cd_complete_failed_rq(drive, rq); if (rq->cmd_type == REQ_TYPE_FS) { if (cmd->nleft == 0) uptodate = 1; } else { if (uptodate <= 0 && rq->errors == 0) rq->errors = -EIO; } if (uptodate == 0 && rq->bio) if (ide_cd_error_cmd(drive, cmd)) return ide_stopped; /* make sure it's fully ended */ if (rq->cmd_type != REQ_TYPE_FS) { rq->resid_len -= cmd->nbytes - cmd->nleft; if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) rq->resid_len += cmd->last_xfer_len; } ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq)); if (sense && rc == 2) ide_error(drive, "request sense failure", stat); } return ide_stopped; } static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq) { struct cdrom_info *cd = drive->driver_data; struct request_queue *q = drive->queue; int write = rq_data_dir(rq) == WRITE; unsigned short sectors_per_frame = queue_logical_block_size(q) >> SECTOR_BITS; ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " "secs_per_frame: %u", rq->cmd[0], rq->cmd_flags, sectors_per_frame); if (write) { /* disk has become write protected */ if (get_disk_ro(cd->disk)) return ide_stopped; } else { /* * We may be retrying this request after an error. Fix up any * weirdness which might be present in the request packet. */ q->prep_rq_fn(q, rq); } /* fs requests *must* be hardware frame aligned */ if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) || (blk_rq_pos(rq) & (sectors_per_frame - 1))) return ide_stopped; /* use DMA, if possible */ drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); if (write) cd->devinfo.media_written = 1; rq->timeout = ATAPI_WAIT_PC; return ide_started; } static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) { ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x", rq->cmd[0], rq->cmd_type); if (rq->cmd_type == REQ_TYPE_BLOCK_PC) rq->cmd_flags |= REQ_QUIET; else rq->cmd_flags &= ~REQ_FAILED; drive->dma = 0; /* sg request */ if (rq->bio) { struct request_queue *q = drive->queue; char *buf = bio_data(rq->bio); unsigned int alignment; drive->dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); /* * check if dma is safe * * NOTE! The "len" and "addr" checks should possibly have * separate masks. */ alignment = queue_dma_alignment(q) | q->dma_pad_mask; if ((unsigned long)buf & alignment || blk_rq_bytes(rq) & q->dma_pad_mask || object_is_on_stack(buf)) drive->dma = 0; } } static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, sector_t block) { struct ide_cmd cmd; int uptodate = 0; unsigned int nsectors; ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, block: %llu", rq->cmd[0], (unsigned long long)block); if (drive->debug_mask & IDE_DBG_RQ) blk_dump_rq_flags(rq, "ide_cd_do_request"); switch (rq->cmd_type) { case REQ_TYPE_FS: if (cdrom_start_rw(drive, rq) == ide_stopped) goto out_end; break; case REQ_TYPE_SENSE: case REQ_TYPE_BLOCK_PC: case REQ_TYPE_ATA_PC: if (!rq->timeout) rq->timeout = ATAPI_WAIT_PC; cdrom_do_block_pc(drive, rq); break; case REQ_TYPE_SPECIAL: /* right now this can only be a reset... */ uptodate = 1; goto out_end; default: BUG(); } /* prepare sense request for this command */ ide_prep_sense(drive, rq); memset(&cmd, 0, sizeof(cmd)); if (rq_data_dir(rq)) cmd.tf_flags |= IDE_TFLAG_WRITE; cmd.rq = rq; if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); ide_map_sg(drive, &cmd); } return ide_issue_pc(drive, &cmd); out_end: nsectors = blk_rq_sectors(rq); if (nsectors == 0) nsectors = 1; ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9); return ide_stopped; } /* * Ioctl handling. * * Routines which queue packet commands take as a final argument a pointer to a * request_sense struct. If execution of the command results in an error with a * CHECK CONDITION status, this structure will be filled with the results of the * subsequent request sense command. The pointer can also be NULL, in which case * no sense information is returned. */ static void msf_from_bcd(struct atapi_msf *msf) { msf->minute = bcd2bin(msf->minute); msf->second = bcd2bin(msf->second); msf->frame = bcd2bin(msf->frame); } int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) { struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *cdi = &info->devinfo; unsigned char cmd[BLK_MAX_CDB]; ide_debug_log(IDE_DBG_FUNC, "enter"); memset(cmd, 0, BLK_MAX_CDB); cmd[0] = GPCMD_TEST_UNIT_READY; /* * Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to switch CDs * instead of supporting the LOAD_UNLOAD opcode. */ cmd[7] = cdi->sanyo_slot % 3; return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET); } static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, unsigned long *sectors_per_frame, struct request_sense *sense) { struct { __be32 lba; __be32 blocklen; } capbuf; int stat; unsigned char cmd[BLK_MAX_CDB]; unsigned len = sizeof(capbuf); u32 blocklen; ide_debug_log(IDE_DBG_FUNC, "enter"); memset(cmd, 0, BLK_MAX_CDB); cmd[0] = GPCMD_READ_CDVD_CAPACITY; stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0, REQ_QUIET); if (stat) return stat; /* * Sanity check the given block size, in so far as making * sure the sectors_per_frame we give to the caller won't * end up being bogus. */ blocklen = be32_to_cpu(capbuf.blocklen); blocklen = (blocklen >> SECTOR_BITS) << SECTOR_BITS; switch (blocklen) { case 512: case 1024: case 2048: case 4096: break; default: printk_once(KERN_ERR PFX "%s: weird block size %u; " "setting default block size to 2048\n", drive->name, blocklen); blocklen = 2048; break; } *capacity = 1 + be32_to_cpu(capbuf.lba); *sectors_per_frame = blocklen >> SECTOR_BITS; ide_debug_log(IDE_DBG_PROBE, "cap: %lu, sectors_per_frame: %lu", *capacity, *sectors_per_frame); return 0; } static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag, int format, char *buf, int buflen, struct request_sense *sense) { unsigned char cmd[BLK_MAX_CDB]; ide_debug_log(IDE_DBG_FUNC, "enter"); memset(cmd, 0, BLK_MAX_CDB); cmd[0] = GPCMD_READ_TOC_PMA_ATIP; cmd[6] = trackno; cmd[7] = (buflen >> 8); cmd[8] = (buflen & 0xff); cmd[9] = (format << 6); if (msf_flag) cmd[1] = 2; return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, REQ_QUIET); } /* Try to read the entire TOC for the disk into our internal buffer. */ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) { int stat, ntracks, i; struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *cdi = &info->devinfo; struct atapi_toc *toc = info->toc; struct { struct atapi_toc_header hdr; struct atapi_toc_entry ent; } ms_tmp; long last_written; unsigned long sectors_per_frame = SECTORS_PER_FRAME; ide_debug_log(IDE_DBG_FUNC, "enter"); if (toc == NULL) { /* try to allocate space */ toc = kmalloc(sizeof(struct atapi_toc), GFP_KERNEL); if (toc == NULL) { printk(KERN_ERR PFX "%s: No cdrom TOC buffer!\n", drive->name); return -ENOMEM; } info->toc = toc; } /* * Check to see if the existing data is still valid. If it is, * just return. */ (void) cdrom_check_status(drive, sense); if (drive->atapi_flags & IDE_AFLAG_TOC_VALID) return 0; /* try to get the total cdrom capacity and sector size */ stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame, sense); if (stat) toc->capacity = 0x1fffff; set_capacity(info->disk, toc->capacity * sectors_per_frame); /* save a private copy of the TOC capacity for error handling */ drive->probed_capacity = toc->capacity * sectors_per_frame; blk_queue_logical_block_size(drive->queue, sectors_per_frame << SECTOR_BITS); /* first read just the header, so we know how long the TOC is */ stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, sizeof(struct atapi_toc_header), sense); if (stat) return stat; if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { toc->hdr.first_track = bcd2bin(toc->hdr.first_track); toc->hdr.last_track = bcd2bin(toc->hdr.last_track); } ntracks = toc->hdr.last_track - toc->hdr.first_track + 1; if (ntracks <= 0) return -EIO; if (ntracks > MAX_TRACKS) ntracks = MAX_TRACKS; /* now read the whole schmeer */ stat = cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0, (char *)&toc->hdr, sizeof(struct atapi_toc_header) + (ntracks + 1) * sizeof(struct atapi_toc_entry), sense); if (stat && toc->hdr.first_track > 1) { /* * Cds with CDI tracks only don't have any TOC entries, despite * of this the returned values are * first_track == last_track = number of CDI tracks + 1, * so that this case is indistinguishable from the same layout * plus an additional audio track. If we get an error for the * regular case, we assume a CDI without additional audio * tracks. In this case the readable TOC is empty (CDI tracks * are not included) and only holds the Leadout entry. * * Heiko Eißfeldt. */ ntracks = 0; stat = cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0, (char *)&toc->hdr, sizeof(struct atapi_toc_header) + (ntracks + 1) * sizeof(struct atapi_toc_entry), sense); if (stat) return stat; if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { toc->hdr.first_track = (u8)bin2bcd(CDROM_LEADOUT); toc->hdr.last_track = (u8)bin2bcd(CDROM_LEADOUT); } else { toc->hdr.first_track = CDROM_LEADOUT; toc->hdr.last_track = CDROM_LEADOUT; } } if (stat) return stat; toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length); if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { toc->hdr.first_track = bcd2bin(toc->hdr.first_track); toc->hdr.last_track = bcd2bin(toc->hdr.last_track); } for (i = 0; i <= ntracks; i++) { if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) { if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) toc->ent[i].track = bcd2bin(toc->ent[i].track); msf_from_bcd(&toc->ent[i].addr.msf); } toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute, toc->ent[i].addr.msf.second, toc->ent[i].addr.msf.frame); } if (toc->hdr.first_track != CDROM_LEADOUT) { /* read the multisession information */ stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp, sizeof(ms_tmp), sense); if (stat) return stat; toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba); } else { ms_tmp.hdr.last_track = CDROM_LEADOUT; ms_tmp.hdr.first_track = ms_tmp.hdr.last_track; toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */ } if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) { /* re-read multisession information using MSF format */ stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp, sizeof(ms_tmp), sense); if (stat) return stat; msf_from_bcd(&ms_tmp.ent.addr.msf); toc->last_session_lba = msf_to_lba(ms_tmp.ent.addr.msf.minute, ms_tmp.ent.addr.msf.second, ms_tmp.ent.addr.msf.frame); } toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track); /* now try to get the total cdrom capacity */ stat = cdrom_get_last_written(cdi, &last_written); if (!stat && (last_written > toc->capacity)) { toc->capacity = last_written; set_capacity(info->disk, toc->capacity * sectors_per_frame); drive->probed_capacity = toc->capacity * sectors_per_frame; } /* Remember that we've read this stuff. */ drive->atapi_flags |= IDE_AFLAG_TOC_VALID; return 0; } int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf) { struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *cdi = &info->devinfo; struct packet_command cgc; int stat, attempts = 3, size = ATAPI_CAPABILITIES_PAGE_SIZE; ide_debug_log(IDE_DBG_FUNC, "enter"); if ((drive->atapi_flags & IDE_AFLAG_FULL_CAPS_PAGE) == 0) size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE; init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN); do { /* we seem to get stat=0x01,err=0x00 the first time (??) */ stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0); if (!stat) break; } while (--attempts); return stat; } void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf) { struct cdrom_info *cd = drive->driver_data; u16 curspeed, maxspeed; ide_debug_log(IDE_DBG_FUNC, "enter"); if (drive->atapi_flags & IDE_AFLAG_LE_SPEED_FIELDS) { curspeed = le16_to_cpup((__le16 *)&buf[8 + 14]); maxspeed = le16_to_cpup((__le16 *)&buf[8 + 8]); } else { curspeed = be16_to_cpup((__be16 *)&buf[8 + 14]); maxspeed = be16_to_cpup((__be16 *)&buf[8 + 8]); } ide_debug_log(IDE_DBG_PROBE, "curspeed: %u, maxspeed: %u", curspeed, maxspeed); cd->current_speed = DIV_ROUND_CLOSEST(curspeed, 176); cd->max_speed = DIV_ROUND_CLOSEST(maxspeed, 176); } #define IDE_CD_CAPABILITIES \ (CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | \ CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | \ CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R | \ CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_GENERIC_PACKET | \ CDC_MO_DRIVE | CDC_MRW | CDC_MRW_W | CDC_RAM) static struct cdrom_device_ops ide_cdrom_dops = { .open = ide_cdrom_open_real, .release = ide_cdrom_release_real, .drive_status = ide_cdrom_drive_status, .check_events = ide_cdrom_check_events_real, .tray_move = ide_cdrom_tray_move, .lock_door = ide_cdrom_lock_door, .select_speed = ide_cdrom_select_speed, .get_last_session = ide_cdrom_get_last_session, .get_mcn = ide_cdrom_get_mcn, .reset = ide_cdrom_reset, .audio_ioctl = ide_cdrom_audio_ioctl, .capability = IDE_CD_CAPABILITIES, .generic_packet = ide_cdrom_packet, }; static int ide_cdrom_register(ide_drive_t *drive, int nslots) { struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *devinfo = &info->devinfo; ide_debug_log(IDE_DBG_PROBE, "nslots: %d", nslots); devinfo->ops = &ide_cdrom_dops; devinfo->speed = info->current_speed; devinfo->capacity = nslots; devinfo->handle = drive; strcpy(devinfo->name, drive->name); if (drive->atapi_flags & IDE_AFLAG_NO_SPEED_SELECT) devinfo->mask |= CDC_SELECT_SPEED; devinfo->disk = info->disk; return register_cdrom(devinfo); } static int ide_cdrom_probe_capabilities(ide_drive_t *drive) { struct cdrom_info *cd = drive->driver_data; struct cdrom_device_info *cdi = &cd->devinfo; u8 buf[ATAPI_CAPABILITIES_PAGE_SIZE]; mechtype_t mechtype; int nslots = 1; ide_debug_log(IDE_DBG_PROBE, "media: 0x%x, atapi_flags: 0x%lx", drive->media, drive->atapi_flags); cdi->mask = (CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_SELECT_DISC | CDC_PLAY_AUDIO | CDC_MO_DRIVE | CDC_RAM); if (drive->media == ide_optical) { cdi->mask &= ~(CDC_MO_DRIVE | CDC_RAM); printk(KERN_ERR PFX "%s: ATAPI magneto-optical drive\n", drive->name); return nslots; } if (drive->atapi_flags & IDE_AFLAG_PRE_ATAPI12) { drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT; cdi->mask &= ~CDC_PLAY_AUDIO; return nslots; } /* * We have to cheat a little here. the packet will eventually be queued * with ide_cdrom_packet(), which extracts the drive from cdi->handle. * Since this device hasn't been registered with the Uniform layer yet, * it can't do this. Same goes for cdi->ops. */ cdi->handle = drive; cdi->ops = &ide_cdrom_dops; if (ide_cdrom_get_capabilities(drive, buf)) return 0; if ((buf[8 + 6] & 0x01) == 0) drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING; if (buf[8 + 6] & 0x08) drive->atapi_flags &= ~IDE_AFLAG_NO_EJECT; if (buf[8 + 3] & 0x01) cdi->mask &= ~CDC_CD_R; if (buf[8 + 3] & 0x02) cdi->mask &= ~(CDC_CD_RW | CDC_RAM); if (buf[8 + 2] & 0x38) cdi->mask &= ~CDC_DVD; if (buf[8 + 3] & 0x20) cdi->mask &= ~(CDC_DVD_RAM | CDC_RAM); if (buf[8 + 3] & 0x10) cdi->mask &= ~CDC_DVD_R; if ((buf[8 + 4] & 0x01) || (drive->atapi_flags & IDE_AFLAG_PLAY_AUDIO_OK)) cdi->mask &= ~CDC_PLAY_AUDIO; mechtype = buf[8 + 6] >> 5; if (mechtype == mechtype_caddy || mechtype == mechtype_popup || (drive->atapi_flags & IDE_AFLAG_NO_AUTOCLOSE)) cdi->mask |= CDC_CLOSE_TRAY; if (cdi->sanyo_slot > 0) { cdi->mask &= ~CDC_SELECT_DISC; nslots = 3; } else if (mechtype == mechtype_individual_changer || mechtype == mechtype_cartridge_changer) { nslots = cdrom_number_of_slots(cdi); if (nslots > 1) cdi->mask &= ~CDC_SELECT_DISC; } ide_cdrom_update_speed(drive, buf); printk(KERN_INFO PFX "%s: ATAPI", drive->name); /* don't print speed if the drive reported 0 */ if (cd->max_speed) printk(KERN_CONT " %dX", cd->max_speed); printk(KERN_CONT " %s", (cdi->mask & CDC_DVD) ? "CD-ROM" : "DVD-ROM"); if ((cdi->mask & CDC_DVD_R) == 0 || (cdi->mask & CDC_DVD_RAM) == 0) printk(KERN_CONT " DVD%s%s", (cdi->mask & CDC_DVD_R) ? "" : "-R", (cdi->mask & CDC_DVD_RAM) ? "" : "/RAM"); if ((cdi->mask & CDC_CD_R) == 0 || (cdi->mask & CDC_CD_RW) == 0) printk(KERN_CONT " CD%s%s", (cdi->mask & CDC_CD_R) ? "" : "-R", (cdi->mask & CDC_CD_RW) ? "" : "/RW"); if ((cdi->mask & CDC_SELECT_DISC) == 0) printk(KERN_CONT " changer w/%d slots", nslots); else printk(KERN_CONT " drive"); printk(KERN_CONT ", %dkB Cache\n", be16_to_cpup((__be16 *)&buf[8 + 12])); return nslots; } /* standard prep_rq_fn that builds 10 byte cmds */ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) { int hard_sect = queue_logical_block_size(q); long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); memset(rq->cmd, 0, BLK_MAX_CDB); if (rq_data_dir(rq) == READ) rq->cmd[0] = GPCMD_READ_10; else rq->cmd[0] = GPCMD_WRITE_10; /* * fill in lba */ rq->cmd[2] = (block >> 24) & 0xff; rq->cmd[3] = (block >> 16) & 0xff; rq->cmd[4] = (block >> 8) & 0xff; rq->cmd[5] = block & 0xff; /* * and transfer length */ rq->cmd[7] = (blocks >> 8) & 0xff; rq->cmd[8] = blocks & 0xff; rq->cmd_len = 10; return BLKPREP_OK; } /* * Most of the SCSI commands are supported directly by ATAPI devices. * This transform handles the few exceptions. */ static int ide_cdrom_prep_pc(struct request *rq) { u8 *c = rq->cmd; /* transform 6-byte read/write commands to the 10-byte version */ if (c[0] == READ_6 || c[0] == WRITE_6) { c[8] = c[4]; c[5] = c[3]; c[4] = c[2]; c[3] = c[1] & 0x1f; c[2] = 0; c[1] &= 0xe0; c[0] += (READ_10 - READ_6); rq->cmd_len = 10; return BLKPREP_OK; } /* * it's silly to pretend we understand 6-byte sense commands, just * reject with ILLEGAL_REQUEST and the caller should take the * appropriate action */ if (c[0] == MODE_SENSE || c[0] == MODE_SELECT) { rq->errors = ILLEGAL_REQUEST; return BLKPREP_KILL; } return BLKPREP_OK; } static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) { if (rq->cmd_type == REQ_TYPE_FS) return ide_cdrom_prep_fs(q, rq); else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) return ide_cdrom_prep_pc(rq); return 0; } struct cd_list_entry { const char *id_model; const char *id_firmware; unsigned int cd_flags; }; #ifdef CONFIG_IDE_PROC_FS static sector_t ide_cdrom_capacity(ide_drive_t *drive) { unsigned long capacity, sectors_per_frame; if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL)) return 0; return capacity * sectors_per_frame; } static int idecd_capacity_proc_show(struct seq_file *m, void *v) { ide_drive_t *drive = m->private; seq_printf(m, "%llu\n", (long long)ide_cdrom_capacity(drive)); return 0; } static int idecd_capacity_proc_open(struct inode *inode, struct file *file) { return single_open(file, idecd_capacity_proc_show, PDE(inode)->data); } static const struct file_operations idecd_capacity_proc_fops = { .owner = THIS_MODULE, .open = idecd_capacity_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static ide_proc_entry_t idecd_proc[] = { { "capacity", S_IFREG|S_IRUGO, &idecd_capacity_proc_fops }, {} }; static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive) { return idecd_proc; } static const struct ide_proc_devset *ide_cd_proc_devsets(ide_drive_t *drive) { return NULL; } #endif static const struct cd_list_entry ide_cd_quirks_list[] = { /* SCR-3231 doesn't support the SET_CD_SPEED command. */ { "SAMSUNG CD-ROM SCR-3231", NULL, IDE_AFLAG_NO_SPEED_SELECT }, /* Old NEC260 (not R) was released before ATAPI 1.2 spec. */ { "NEC CD-ROM DRIVE:260", "1.01", IDE_AFLAG_TOCADDR_AS_BCD | IDE_AFLAG_PRE_ATAPI12, }, /* Vertos 300, some versions of this drive like to talk BCD. */ { "V003S0DS", NULL, IDE_AFLAG_VERTOS_300_SSD, }, /* Vertos 600 ESD. */ { "V006E0DS", NULL, IDE_AFLAG_VERTOS_600_ESD, }, /* * Sanyo 3 CD changer uses a non-standard command for CD changing * (by default standard ATAPI support for CD changers is used). */ { "CD-ROM CDR-C3 G", NULL, IDE_AFLAG_SANYO_3CD }, { "CD-ROM CDR-C3G", NULL, IDE_AFLAG_SANYO_3CD }, { "CD-ROM CDR_C36", NULL, IDE_AFLAG_SANYO_3CD }, /* Stingray 8X CD-ROM. */ { "STINGRAY 8422 IDE 8X CD-ROM 7-27-95", NULL, IDE_AFLAG_PRE_ATAPI12 }, /* * ACER 50X CD-ROM and WPI 32X CD-ROM require the full spec length * mode sense page capabilities size, but older drives break. */ { "ATAPI CD ROM DRIVE 50X MAX", NULL, IDE_AFLAG_FULL_CAPS_PAGE }, { "WPI CDS-32X", NULL, IDE_AFLAG_FULL_CAPS_PAGE }, /* ACER/AOpen 24X CD-ROM has the speed fields byte-swapped. */ { "", "241N", IDE_AFLAG_LE_SPEED_FIELDS }, /* * Some drives used by Apple don't advertise audio play * but they do support reading TOC & audio datas. */ { "MATSHITADVD-ROM SR-8187", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, { "MATSHITADVD-ROM SR-8186", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, { "MATSHITADVD-ROM SR-8176", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, { "MATSHITADVD-ROM SR-8174", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, { "Optiarc DVD RW AD-5200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, { "Optiarc DVD RW AD-7200A", NULL, IDE_AFLAG_PLAY_AUDIO_OK }, { "Optiarc DVD RW AD-7543A", NULL, IDE_AFLAG_NO_AUTOCLOSE }, { "TEAC CD-ROM CD-224E", NULL, IDE_AFLAG_NO_AUTOCLOSE }, { NULL, NULL, 0 } }; static unsigned int ide_cd_flags(u16 *id) { const struct cd_list_entry *cle = ide_cd_quirks_list; while (cle->id_model) { if (strcmp(cle->id_model, (char *)&id[ATA_ID_PROD]) == 0 && (cle->id_firmware == NULL || strstr((char *)&id[ATA_ID_FW_REV], cle->id_firmware))) return cle->cd_flags; cle++; } return 0; } static int ide_cdrom_setup(ide_drive_t *drive) { struct cdrom_info *cd = drive->driver_data; struct cdrom_device_info *cdi = &cd->devinfo; struct request_queue *q = drive->queue; u16 *id = drive->id; char *fw_rev = (char *)&id[ATA_ID_FW_REV]; int nslots; ide_debug_log(IDE_DBG_PROBE, "enter"); blk_queue_prep_rq(q, ide_cdrom_prep_fn); blk_queue_dma_alignment(q, 31); blk_queue_update_dma_pad(q, 15); drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED; drive->atapi_flags = IDE_AFLAG_NO_EJECT | ide_cd_flags(id); if ((drive->atapi_flags & IDE_AFLAG_VERTOS_300_SSD) && fw_rev[4] == '1' && fw_rev[6] <= '2') drive->atapi_flags |= (IDE_AFLAG_TOCTRACKS_AS_BCD | IDE_AFLAG_TOCADDR_AS_BCD); else if ((drive->atapi_flags & IDE_AFLAG_VERTOS_600_ESD) && fw_rev[4] == '1' && fw_rev[6] <= '2') drive->atapi_flags |= IDE_AFLAG_TOCTRACKS_AS_BCD; else if (drive->atapi_flags & IDE_AFLAG_SANYO_3CD) /* 3 => use CD in slot 0 */ cdi->sanyo_slot = 3; nslots = ide_cdrom_probe_capabilities(drive); blk_queue_logical_block_size(q, CD_FRAMESIZE); if (ide_cdrom_register(drive, nslots)) { printk(KERN_ERR PFX "%s: %s failed to register device with the" " cdrom driver.\n", drive->name, __func__); cd->devinfo.handle = NULL; return 1; } ide_proc_register_driver(drive, cd->driver); return 0; } static void ide_cd_remove(ide_drive_t *drive) { struct cdrom_info *info = drive->driver_data; ide_debug_log(IDE_DBG_FUNC, "enter"); ide_proc_unregister_driver(drive, info->driver); device_del(&info->dev); del_gendisk(info->disk); mutex_lock(&idecd_ref_mutex); put_device(&info->dev); mutex_unlock(&idecd_ref_mutex); } static void ide_cd_release(struct device *dev) { struct cdrom_info *info = to_ide_drv(dev, cdrom_info); struct cdrom_device_info *devinfo = &info->devinfo; ide_drive_t *drive = info->drive; struct gendisk *g = info->disk; ide_debug_log(IDE_DBG_FUNC, "enter"); kfree(info->toc); if (devinfo->handle == drive) unregister_cdrom(devinfo); drive->driver_data = NULL; blk_queue_prep_rq(drive->queue, NULL); g->private_data = NULL; put_disk(g); kfree(info); } static int ide_cd_probe(ide_drive_t *); static struct ide_driver ide_cdrom_driver = { .gen_driver = { .owner = THIS_MODULE, .name = "ide-cdrom", .bus = &ide_bus_type, }, .probe = ide_cd_probe, .remove = ide_cd_remove, .version = IDECD_VERSION, .do_request = ide_cd_do_request, #ifdef CONFIG_IDE_PROC_FS .proc_entries = ide_cd_proc_entries, .proc_devsets = ide_cd_proc_devsets, #endif }; static int idecd_open(struct block_device *bdev, fmode_t mode) { struct cdrom_info *info; int rc = -ENXIO; mutex_lock(&ide_cd_mutex); info = ide_cd_get(bdev->bd_disk); if (!info) goto out; rc = cdrom_open(&info->devinfo, bdev, mode); if (rc < 0) ide_cd_put(info); out: mutex_unlock(&ide_cd_mutex); return rc; } static int idecd_release(struct gendisk *disk, fmode_t mode) { struct cdrom_info *info = ide_drv_g(disk, cdrom_info); mutex_lock(&ide_cd_mutex); cdrom_release(&info->devinfo, mode); ide_cd_put(info); mutex_unlock(&ide_cd_mutex); return 0; } static int idecd_set_spindown(struct cdrom_device_info *cdi, unsigned long arg) { struct packet_command cgc; char buffer[16]; int stat; char spindown; if (copy_from_user(&spindown, (void __user *)arg, sizeof(char))) return -EFAULT; init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN); stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0); if (stat) return stat; buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f); return cdrom_mode_select(cdi, &cgc); } static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg) { struct packet_command cgc; char buffer[16]; int stat; char spindown; init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN); stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0); if (stat) return stat; spindown = buffer[11] & 0x0f; if (copy_to_user((void __user *)arg, &spindown, sizeof(char))) return -EFAULT; return 0; } static int idecd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info); int err; switch (cmd) { case CDROMSETSPINDOWN: return idecd_set_spindown(&info->devinfo, arg); case CDROMGETSPINDOWN: return idecd_get_spindown(&info->devinfo, arg); default: break; } err = generic_ide_ioctl(info->drive, bdev, cmd, arg); if (err == -EINVAL) err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd, arg); return err; } static int idecd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&ide_cd_mutex); ret = idecd_locked_ioctl(bdev, mode, cmd, arg); mutex_unlock(&ide_cd_mutex); return ret; } static unsigned int idecd_check_events(struct gendisk *disk, unsigned int clearing) { struct cdrom_info *info = ide_drv_g(disk, cdrom_info); return cdrom_check_events(&info->devinfo, clearing); } static int idecd_revalidate_disk(struct gendisk *disk) { struct cdrom_info *info = ide_drv_g(disk, cdrom_info); struct request_sense sense; ide_cd_read_toc(info->drive, &sense); return 0; } static const struct block_device_operations idecd_ops = { .owner = THIS_MODULE, .open = idecd_open, .release = idecd_release, .ioctl = idecd_ioctl, .check_events = idecd_check_events, .revalidate_disk = idecd_revalidate_disk }; /* module options */ static unsigned long debug_mask; module_param(debug_mask, ulong, 0644); MODULE_DESCRIPTION("ATAPI CD-ROM Driver"); static int ide_cd_probe(ide_drive_t *drive) { struct cdrom_info *info; struct gendisk *g; struct request_sense sense; ide_debug_log(IDE_DBG_PROBE, "driver_req: %s, media: 0x%x", drive->driver_req, drive->media); if (!strstr("ide-cdrom", drive->driver_req)) goto failed; if (drive->media != ide_cdrom && drive->media != ide_optical) goto failed; drive->debug_mask = debug_mask; drive->irq_handler = cdrom_newpc_intr; info = kzalloc(sizeof(struct cdrom_info), GFP_KERNEL); if (info == NULL) { printk(KERN_ERR PFX "%s: Can't allocate a cdrom structure\n", drive->name); goto failed; } g = alloc_disk(1 << PARTN_BITS); if (!g) goto out_free_cd; ide_init_disk(g, drive); info->dev.parent = &drive->gendev; info->dev.release = ide_cd_release; dev_set_name(&info->dev, dev_name(&drive->gendev)); if (device_register(&info->dev)) goto out_free_disk; info->drive = drive; info->driver = &ide_cdrom_driver; info->disk = g; g->private_data = &info->driver; drive->driver_data = info; g->minors = 1; g->driverfs_dev = &drive->gendev; g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE; if (ide_cdrom_setup(drive)) { put_device(&info->dev); goto failed; } ide_cd_read_toc(drive, &sense); g->fops = &idecd_ops; g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; add_disk(g); return 0; out_free_disk: put_disk(g); out_free_cd: kfree(info); failed: return -ENODEV; } static void __exit ide_cdrom_exit(void) { driver_unregister(&ide_cdrom_driver.gen_driver); } static int __init ide_cdrom_init(void) { printk(KERN_INFO DRV_NAME " driver " IDECD_VERSION "\n"); return driver_register(&ide_cdrom_driver.gen_driver); } MODULE_ALIAS("ide:*m-cdrom*"); MODULE_ALIAS("ide-cd"); module_init(ide_cdrom_init); module_exit(ide_cdrom_exit); MODULE_LICENSE("GPL");
gpl-2.0
InfinitiveOS-Devices/android_kernel_motorola_msm8610
net/sunrpc/socklib.c
7674
4622
/* * linux/net/sunrpc/socklib.c * * Common socket helper routines for RPC client and server * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/compiler.h> #include <linux/netdevice.h> #include <linux/gfp.h> #include <linux/skbuff.h> #include <linux/types.h> #include <linux/pagemap.h> #include <linux/udp.h> #include <linux/sunrpc/xdr.h> #include <linux/export.h> /** * xdr_skb_read_bits - copy some data bits from skb to internal buffer * @desc: sk_buff copy helper * @to: copy destination * @len: number of bytes to copy * * Possibly called several times to iterate over an sk_buff and copy * data out of it. */ size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len) { if (len > desc->count) len = desc->count; if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len))) return 0; desc->count -= len; desc->offset += len; return len; } EXPORT_SYMBOL_GPL(xdr_skb_read_bits); /** * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer * @desc: sk_buff copy helper * @to: copy destination * @len: number of bytes to copy * * Same as skb_read_bits, but calculate a checksum at the same time. */ static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len) { unsigned int pos; __wsum csum2; if (len > desc->count) len = desc->count; pos = desc->offset; csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); desc->csum = csum_block_add(desc->csum, csum2, pos); desc->count -= len; desc->offset += len; return len; } /** * xdr_partial_copy_from_skb - copy data out of an skb * @xdr: target XDR buffer * @base: starting offset * @desc: sk_buff copy helper * @copy_actor: virtual method for copying data * */ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor) { struct page **ppage = xdr->pages; unsigned int len, pglen = xdr->page_len; ssize_t copied = 0; size_t ret; len = xdr->head[0].iov_len; if (base < len) { len -= base; ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); copied += ret; if (ret != len || !desc->count) goto out; base = 0; } else base -= len; if (unlikely(pglen == 0)) goto copy_tail; if (unlikely(base >= pglen)) { base -= pglen; goto copy_tail; } if (base || xdr->page_base) { pglen -= base; base += xdr->page_base; ppage += base >> PAGE_CACHE_SHIFT; base &= ~PAGE_CACHE_MASK; } do { char *kaddr; /* ACL likes to be lazy in allocating pages - ACLs * are small by default but can get huge. */ if (unlikely(*ppage == NULL)) { *ppage = alloc_page(GFP_ATOMIC); if (unlikely(*ppage == NULL)) { if (copied == 0) copied = -ENOMEM; goto out; } } len = PAGE_CACHE_SIZE; kaddr = kmap_atomic(*ppage); if (base) { len -= base; if (pglen < len) len = pglen; ret = copy_actor(desc, kaddr + base, len); base = 0; } else { if (pglen < len) len = pglen; ret = copy_actor(desc, kaddr, len); } flush_dcache_page(*ppage); kunmap_atomic(kaddr); copied += ret; if (ret != len || !desc->count) goto out; ppage++; } while ((pglen -= len) != 0); copy_tail: len = xdr->tail[0].iov_len; if (base < len) copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); out: return copied; } EXPORT_SYMBOL_GPL(xdr_partial_copy_from_skb); /** * csum_partial_copy_to_xdr - checksum and copy data * @xdr: target XDR buffer * @skb: source skb * * We have set things up such that we perform the checksum of the UDP * packet in parallel with the copies into the RPC client iovec. -DaveM */ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) { struct xdr_skb_reader desc; desc.skb = skb; desc.offset = sizeof(struct udphdr); desc.count = skb->len - desc.offset; if (skb_csum_unnecessary(skb)) goto no_checksum; desc.csum = csum_partial(skb->data, desc.offset, skb->csum); if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0) return -1; if (desc.offset != skb->len) { __wsum csum2; csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); desc.csum = csum_block_add(desc.csum, csum2, desc.offset); } if (desc.count) return -1; if (csum_fold(desc.csum)) return -1; if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); return 0; no_checksum: if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) return -1; if (desc.count) return -1; return 0; } EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);
gpl-2.0
axxx007xxxz/cm_kernel_motorola_msm8916
net/sunrpc/socklib.c
7674
4622
/* * linux/net/sunrpc/socklib.c * * Common socket helper routines for RPC client and server * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/compiler.h> #include <linux/netdevice.h> #include <linux/gfp.h> #include <linux/skbuff.h> #include <linux/types.h> #include <linux/pagemap.h> #include <linux/udp.h> #include <linux/sunrpc/xdr.h> #include <linux/export.h> /** * xdr_skb_read_bits - copy some data bits from skb to internal buffer * @desc: sk_buff copy helper * @to: copy destination * @len: number of bytes to copy * * Possibly called several times to iterate over an sk_buff and copy * data out of it. */ size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len) { if (len > desc->count) len = desc->count; if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len))) return 0; desc->count -= len; desc->offset += len; return len; } EXPORT_SYMBOL_GPL(xdr_skb_read_bits); /** * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer * @desc: sk_buff copy helper * @to: copy destination * @len: number of bytes to copy * * Same as skb_read_bits, but calculate a checksum at the same time. */ static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len) { unsigned int pos; __wsum csum2; if (len > desc->count) len = desc->count; pos = desc->offset; csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); desc->csum = csum_block_add(desc->csum, csum2, pos); desc->count -= len; desc->offset += len; return len; } /** * xdr_partial_copy_from_skb - copy data out of an skb * @xdr: target XDR buffer * @base: starting offset * @desc: sk_buff copy helper * @copy_actor: virtual method for copying data * */ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor) { struct page **ppage = xdr->pages; unsigned int len, pglen = xdr->page_len; ssize_t copied = 0; size_t ret; len = xdr->head[0].iov_len; if (base < len) { len -= base; ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); copied += ret; if (ret != len || !desc->count) goto out; base = 0; } else base -= len; if (unlikely(pglen == 0)) goto copy_tail; if (unlikely(base >= pglen)) { base -= pglen; goto copy_tail; } if (base || xdr->page_base) { pglen -= base; base += xdr->page_base; ppage += base >> PAGE_CACHE_SHIFT; base &= ~PAGE_CACHE_MASK; } do { char *kaddr; /* ACL likes to be lazy in allocating pages - ACLs * are small by default but can get huge. */ if (unlikely(*ppage == NULL)) { *ppage = alloc_page(GFP_ATOMIC); if (unlikely(*ppage == NULL)) { if (copied == 0) copied = -ENOMEM; goto out; } } len = PAGE_CACHE_SIZE; kaddr = kmap_atomic(*ppage); if (base) { len -= base; if (pglen < len) len = pglen; ret = copy_actor(desc, kaddr + base, len); base = 0; } else { if (pglen < len) len = pglen; ret = copy_actor(desc, kaddr, len); } flush_dcache_page(*ppage); kunmap_atomic(kaddr); copied += ret; if (ret != len || !desc->count) goto out; ppage++; } while ((pglen -= len) != 0); copy_tail: len = xdr->tail[0].iov_len; if (base < len) copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); out: return copied; } EXPORT_SYMBOL_GPL(xdr_partial_copy_from_skb); /** * csum_partial_copy_to_xdr - checksum and copy data * @xdr: target XDR buffer * @skb: source skb * * We have set things up such that we perform the checksum of the UDP * packet in parallel with the copies into the RPC client iovec. -DaveM */ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) { struct xdr_skb_reader desc; desc.skb = skb; desc.offset = sizeof(struct udphdr); desc.count = skb->len - desc.offset; if (skb_csum_unnecessary(skb)) goto no_checksum; desc.csum = csum_partial(skb->data, desc.offset, skb->csum); if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0) return -1; if (desc.offset != skb->len) { __wsum csum2; csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); desc.csum = csum_block_add(desc.csum, csum2, desc.offset); } if (desc.count) return -1; if (csum_fold(desc.csum)) return -1; if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); return 0; no_checksum: if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) return -1; if (desc.count) return -1; return 0; } EXPORT_SYMBOL_GPL(csum_partial_copy_to_xdr);
gpl-2.0
CyanideL/android_kernel_samsung_smdk4412
crypto/khazad.c
9978
53043
/* * Cryptographic API. * * Khazad Algorithm * * The Khazad algorithm was developed by Paulo S. L. M. Barreto and * Vincent Rijmen. It was a finalist in the NESSIE encryption contest. * * The original authors have disclaimed all copyright interest in this * code and thus put it in the public domain. The subsequent authors * have put this under the GNU General Public License. * * By Aaron Grothe ajgrothe@yahoo.com, August 1, 2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <linux/crypto.h> #include <linux/types.h> #define KHAZAD_KEY_SIZE 16 #define KHAZAD_BLOCK_SIZE 8 #define KHAZAD_ROUNDS 8 struct khazad_ctx { u64 E[KHAZAD_ROUNDS + 1]; u64 D[KHAZAD_ROUNDS + 1]; }; static const u64 T0[256] = { 0xbad3d268bbb96a01ULL, 0x54fc4d19e59a66b1ULL, 0x2f71bc93e26514cdULL, 0x749ccdb925871b51ULL, 0x53f55102f7a257a4ULL, 0xd3686bb8d0d6be03ULL, 0xd26b6fbdd6deb504ULL, 0x4dd72964b35285feULL, 0x50f05d0dfdba4aadULL, 0xace98a26cf09e063ULL, 0x8d8a0e83091c9684ULL, 0xbfdcc679a5914d1aULL, 0x7090ddad3da7374dULL, 0x52f65507f1aa5ca3ULL, 0x9ab352c87ba417e1ULL, 0x4cd42d61b55a8ef9ULL, 0xea238f65460320acULL, 0xd56273a6c4e68411ULL, 0x97a466f155cc68c2ULL, 0xd16e63b2dcc6a80dULL, 0x3355ccffaa85d099ULL, 0x51f35908fbb241aaULL, 0x5bed712ac7e20f9cULL, 0xa6f7a204f359ae55ULL, 0xde7f5f81febec120ULL, 0x48d83d75ad7aa2e5ULL, 0xa8e59a32d729cc7fULL, 0x99b65ec771bc0ae8ULL, 0xdb704b90e096e63bULL, 0x3256c8faac8ddb9eULL, 0xb7c4e65195d11522ULL, 0xfc19d72b32b3aaceULL, 0xe338ab48704b7393ULL, 0x9ebf42dc63843bfdULL, 0x91ae7eef41fc52d0ULL, 0x9bb056cd7dac1ce6ULL, 0xe23baf4d76437894ULL, 0xbbd0d66dbdb16106ULL, 0x41c319589b32f1daULL, 0x6eb2a5cb7957e517ULL, 0xa5f2ae0bf941b35cULL, 0xcb400bc08016564bULL, 0x6bbdb1da677fc20cULL, 0x95a26efb59dc7eccULL, 0xa1febe1fe1619f40ULL, 0xf308eb1810cbc3e3ULL, 0xb1cefe4f81e12f30ULL, 0x0206080a0c10160eULL, 0xcc4917db922e675eULL, 0xc45137f3a26e3f66ULL, 0x1d2774694ee8cf53ULL, 0x143c504478a09c6cULL, 0xc3582be8b0560e73ULL, 0x63a591f2573f9a34ULL, 0xda734f95e69eed3cULL, 0x5de76934d3d2358eULL, 0x5fe1613edfc22380ULL, 0xdc79578bf2aed72eULL, 0x7d87e99413cf486eULL, 0xcd4a13de94266c59ULL, 0x7f81e19e1fdf5e60ULL, 0x5aee752fc1ea049bULL, 0x6cb4adc17547f319ULL, 0x5ce46d31d5da3e89ULL, 0xf704fb0c08ebefffULL, 0x266a98bed42d47f2ULL, 0xff1cdb2438abb7c7ULL, 0xed2a937e543b11b9ULL, 0xe825876f4a1336a2ULL, 0x9dba4ed3699c26f4ULL, 0x6fb1a1ce7f5fee10ULL, 0x8e8f028c03048b8dULL, 0x192b647d56c8e34fULL, 0xa0fdba1ae7699447ULL, 0xf00de7171ad3deeaULL, 0x89861e97113cba98ULL, 0x0f113c332278692dULL, 0x07091c1b12383115ULL, 0xafec8629c511fd6aULL, 0xfb10cb30208b9bdbULL, 0x0818202830405838ULL, 0x153f54417ea8976bULL, 0x0d1734392e687f23ULL, 0x040c101418202c1cULL, 0x0103040506080b07ULL, 0x64ac8de94507ab21ULL, 0xdf7c5b84f8b6ca27ULL, 0x769ac5b329970d5fULL, 0x798bf9800bef6472ULL, 0xdd7a538ef4a6dc29ULL, 0x3d47f4c98ef5b2b3ULL, 0x163a584e74b08a62ULL, 0x3f41fcc382e5a4bdULL, 0x3759dcebb2a5fc85ULL, 0x6db7a9c4734ff81eULL, 0x3848e0d890dd95a8ULL, 0xb9d6de67b1a17708ULL, 0x7395d1a237bf2a44ULL, 0xe926836a4c1b3da5ULL, 0x355fd4e1beb5ea8bULL, 0x55ff491ce3926db6ULL, 0x7193d9a83baf3c4aULL, 0x7b8df18a07ff727cULL, 0x8c890a860f149d83ULL, 0x7296d5a731b72143ULL, 0x88851a921734b19fULL, 0xf607ff090ee3e4f8ULL, 0x2a7ea882fc4d33d6ULL, 0x3e42f8c684edafbaULL, 0x5ee2653bd9ca2887ULL, 0x27699cbbd2254cf5ULL, 0x46ca0543890ac0cfULL, 0x0c14303c28607424ULL, 0x65af89ec430fa026ULL, 0x68b8bdd56d67df05ULL, 0x61a399f85b2f8c3aULL, 0x03050c0f0a181d09ULL, 0xc15e23e2bc46187dULL, 0x57f94116ef827bb8ULL, 0xd6677fa9cefe9918ULL, 0xd976439aec86f035ULL, 0x58e87d25cdfa1295ULL, 0xd875479fea8efb32ULL, 0x66aa85e34917bd2fULL, 0xd7647bacc8f6921fULL, 0x3a4ee8d29ccd83a6ULL, 0xc84507cf8a0e4b42ULL, 0x3c44f0cc88fdb9b4ULL, 0xfa13cf35268390dcULL, 0x96a762f453c463c5ULL, 0xa7f4a601f551a552ULL, 0x98b55ac277b401efULL, 0xec29977b52331abeULL, 0xb8d5da62b7a97c0fULL, 0xc7543bfca876226fULL, 0xaeef822cc319f66dULL, 0x69bbb9d06b6fd402ULL, 0x4bdd317aa762bfecULL, 0xabe0963ddd31d176ULL, 0xa9e69e37d121c778ULL, 0x67a981e64f1fb628ULL, 0x0a1e28223c504e36ULL, 0x47c901468f02cbc8ULL, 0xf20bef1d16c3c8e4ULL, 0xb5c2ee5b99c1032cULL, 0x226688aacc0d6beeULL, 0xe532b356647b4981ULL, 0xee2f9f715e230cb0ULL, 0xbedfc27ca399461dULL, 0x2b7dac87fa4538d1ULL, 0x819e3ebf217ce2a0ULL, 0x1236485a6c90a67eULL, 0x839836b52d6cf4aeULL, 0x1b2d6c775ad8f541ULL, 0x0e1238362470622aULL, 0x23658cafca0560e9ULL, 0xf502f30604fbf9f1ULL, 0x45cf094c8312ddc6ULL, 0x216384a5c61576e7ULL, 0xce4f1fd19e3e7150ULL, 0x49db3970ab72a9e2ULL, 0x2c74b09ce87d09c4ULL, 0xf916c33a2c9b8dd5ULL, 0xe637bf596e635488ULL, 0xb6c7e25493d91e25ULL, 0x2878a088f05d25d8ULL, 0x17395c4b72b88165ULL, 0x829b32b02b64ffa9ULL, 0x1a2e68725cd0fe46ULL, 0x8b80169d1d2cac96ULL, 0xfe1fdf213ea3bcc0ULL, 0x8a8312981b24a791ULL, 0x091b242d3648533fULL, 0xc94603ca8c064045ULL, 0x879426a1354cd8b2ULL, 0x4ed2256bb94a98f7ULL, 0xe13ea3427c5b659dULL, 0x2e72b896e46d1fcaULL, 0xe431b75362734286ULL, 0xe03da7477a536e9aULL, 0xeb208b60400b2babULL, 0x90ad7aea47f459d7ULL, 0xa4f1aa0eff49b85bULL, 0x1e22786644f0d25aULL, 0x85922eab395ccebcULL, 0x60a09dfd5d27873dULL, 0x0000000000000000ULL, 0x256f94b1de355afbULL, 0xf401f70302f3f2f6ULL, 0xf10ee3121cdbd5edULL, 0x94a16afe5fd475cbULL, 0x0b1d2c273a584531ULL, 0xe734bb5c686b5f8fULL, 0x759fc9bc238f1056ULL, 0xef2c9b74582b07b7ULL, 0x345cd0e4b8bde18cULL, 0x3153c4f5a695c697ULL, 0xd46177a3c2ee8f16ULL, 0xd06d67b7dacea30aULL, 0x869722a43344d3b5ULL, 0x7e82e59b19d75567ULL, 0xadea8e23c901eb64ULL, 0xfd1ad32e34bba1c9ULL, 0x297ba48df6552edfULL, 0x3050c0f0a09dcd90ULL, 0x3b4decd79ac588a1ULL, 0x9fbc46d9658c30faULL, 0xf815c73f2a9386d2ULL, 0xc6573ff9ae7e2968ULL, 0x13354c5f6a98ad79ULL, 0x060a181e14303a12ULL, 0x050f14111e28271bULL, 0xc55233f6a4663461ULL, 0x113344556688bb77ULL, 0x7799c1b62f9f0658ULL, 0x7c84ed9115c74369ULL, 0x7a8ef58f01f7797bULL, 0x7888fd850de76f75ULL, 0x365ad8eeb4adf782ULL, 0x1c24706c48e0c454ULL, 0x394be4dd96d59eafULL, 0x59eb7920cbf21992ULL, 0x1828607850c0e848ULL, 0x56fa4513e98a70bfULL, 0xb3c8f6458df1393eULL, 0xb0cdfa4a87e92437ULL, 0x246c90b4d83d51fcULL, 0x206080a0c01d7de0ULL, 0xb2cbf2408bf93239ULL, 0x92ab72e04be44fd9ULL, 0xa3f8b615ed71894eULL, 0xc05d27e7ba4e137aULL, 0x44cc0d49851ad6c1ULL, 0x62a695f751379133ULL, 0x103040506080b070ULL, 0xb4c1ea5e9fc9082bULL, 0x84912aae3f54c5bbULL, 0x43c511529722e7d4ULL, 0x93a876e54dec44deULL, 0xc25b2fedb65e0574ULL, 0x4ade357fa16ab4ebULL, 0xbddace73a9815b14ULL, 0x8f8c0689050c808aULL, 0x2d77b499ee7502c3ULL, 0xbcd9ca76af895013ULL, 0x9cb94ad66f942df3ULL, 0x6abeb5df6177c90bULL, 0x40c01d5d9d3afaddULL, 0xcf4c1bd498367a57ULL, 0xa2fbb210eb798249ULL, 0x809d3aba2774e9a7ULL, 0x4fd1216ebf4293f0ULL, 0x1f217c6342f8d95dULL, 0xca430fc5861e5d4cULL, 0xaae39238db39da71ULL, 0x42c61557912aecd3ULL }; static const u64 T1[256] = { 0xd3ba68d2b9bb016aULL, 0xfc54194d9ae5b166ULL, 0x712f93bc65e2cd14ULL, 0x9c74b9cd8725511bULL, 0xf5530251a2f7a457ULL, 0x68d3b86bd6d003beULL, 0x6bd2bd6fded604b5ULL, 0xd74d642952b3fe85ULL, 0xf0500d5dbafdad4aULL, 0xe9ac268a09cf63e0ULL, 0x8a8d830e1c098496ULL, 0xdcbf79c691a51a4dULL, 0x9070addda73d4d37ULL, 0xf6520755aaf1a35cULL, 0xb39ac852a47be117ULL, 0xd44c612d5ab5f98eULL, 0x23ea658f0346ac20ULL, 0x62d5a673e6c41184ULL, 0xa497f166cc55c268ULL, 0x6ed1b263c6dc0da8ULL, 0x5533ffcc85aa99d0ULL, 0xf3510859b2fbaa41ULL, 0xed5b2a71e2c79c0fULL, 0xf7a604a259f355aeULL, 0x7fde815fbefe20c1ULL, 0xd848753d7aade5a2ULL, 0xe5a8329a29d77fccULL, 0xb699c75ebc71e80aULL, 0x70db904b96e03be6ULL, 0x5632fac88dac9edbULL, 0xc4b751e6d1952215ULL, 0x19fc2bd7b332ceaaULL, 0x38e348ab4b709373ULL, 0xbf9edc428463fd3bULL, 0xae91ef7efc41d052ULL, 0xb09bcd56ac7de61cULL, 0x3be24daf43769478ULL, 0xd0bb6dd6b1bd0661ULL, 0xc3415819329bdaf1ULL, 0xb26ecba5577917e5ULL, 0xf2a50bae41f95cb3ULL, 0x40cbc00b16804b56ULL, 0xbd6bdab17f670cc2ULL, 0xa295fb6edc59cc7eULL, 0xfea11fbe61e1409fULL, 0x08f318ebcb10e3c3ULL, 0xceb14ffee181302fULL, 0x06020a08100c0e16ULL, 0x49ccdb172e925e67ULL, 0x51c4f3376ea2663fULL, 0x271d6974e84e53cfULL, 0x3c144450a0786c9cULL, 0x58c3e82b56b0730eULL, 0xa563f2913f57349aULL, 0x73da954f9ee63cedULL, 0xe75d3469d2d38e35ULL, 0xe15f3e61c2df8023ULL, 0x79dc8b57aef22ed7ULL, 0x877d94e9cf136e48ULL, 0x4acdde132694596cULL, 0x817f9ee1df1f605eULL, 0xee5a2f75eac19b04ULL, 0xb46cc1ad477519f3ULL, 0xe45c316ddad5893eULL, 0x04f70cfbeb08ffefULL, 0x6a26be982dd4f247ULL, 0x1cff24dbab38c7b7ULL, 0x2aed7e933b54b911ULL, 0x25e86f87134aa236ULL, 0xba9dd34e9c69f426ULL, 0xb16fcea15f7f10eeULL, 0x8f8e8c0204038d8bULL, 0x2b197d64c8564fe3ULL, 0xfda01aba69e74794ULL, 0x0df017e7d31aeadeULL, 0x8689971e3c1198baULL, 0x110f333c78222d69ULL, 0x09071b1c38121531ULL, 0xecaf298611c56afdULL, 0x10fb30cb8b20db9bULL, 0x1808282040303858ULL, 0x3f154154a87e6b97ULL, 0x170d3934682e237fULL, 0x0c04141020181c2cULL, 0x030105040806070bULL, 0xac64e98d074521abULL, 0x7cdf845bb6f827caULL, 0x9a76b3c597295f0dULL, 0x8b7980f9ef0b7264ULL, 0x7add8e53a6f429dcULL, 0x473dc9f4f58eb3b2ULL, 0x3a164e58b074628aULL, 0x413fc3fce582bda4ULL, 0x5937ebdca5b285fcULL, 0xb76dc4a94f731ef8ULL, 0x4838d8e0dd90a895ULL, 0xd6b967dea1b10877ULL, 0x9573a2d1bf37442aULL, 0x26e96a831b4ca53dULL, 0x5f35e1d4b5be8beaULL, 0xff551c4992e3b66dULL, 0x9371a8d9af3b4a3cULL, 0x8d7b8af1ff077c72ULL, 0x898c860a140f839dULL, 0x9672a7d5b7314321ULL, 0x8588921a34179fb1ULL, 0x07f609ffe30ef8e4ULL, 0x7e2a82a84dfcd633ULL, 0x423ec6f8ed84baafULL, 0xe25e3b65cad98728ULL, 0x6927bb9c25d2f54cULL, 0xca4643050a89cfc0ULL, 0x140c3c3060282474ULL, 0xaf65ec890f4326a0ULL, 0xb868d5bd676d05dfULL, 0xa361f8992f5b3a8cULL, 0x05030f0c180a091dULL, 0x5ec1e22346bc7d18ULL, 0xf957164182efb87bULL, 0x67d6a97ffece1899ULL, 0x76d99a4386ec35f0ULL, 0xe858257dfacd9512ULL, 0x75d89f478eea32fbULL, 0xaa66e38517492fbdULL, 0x64d7ac7bf6c81f92ULL, 0x4e3ad2e8cd9ca683ULL, 0x45c8cf070e8a424bULL, 0x443cccf0fd88b4b9ULL, 0x13fa35cf8326dc90ULL, 0xa796f462c453c563ULL, 0xf4a701a651f552a5ULL, 0xb598c25ab477ef01ULL, 0x29ec7b973352be1aULL, 0xd5b862daa9b70f7cULL, 0x54c7fc3b76a86f22ULL, 0xefae2c8219c36df6ULL, 0xbb69d0b96f6b02d4ULL, 0xdd4b7a3162a7ecbfULL, 0xe0ab3d9631dd76d1ULL, 0xe6a9379e21d178c7ULL, 0xa967e6811f4f28b6ULL, 0x1e0a2228503c364eULL, 0xc9474601028fc8cbULL, 0x0bf21defc316e4c8ULL, 0xc2b55beec1992c03ULL, 0x6622aa880dccee6bULL, 0x32e556b37b648149ULL, 0x2fee719f235eb00cULL, 0xdfbe7cc299a31d46ULL, 0x7d2b87ac45fad138ULL, 0x9e81bf3e7c21a0e2ULL, 0x36125a48906c7ea6ULL, 0x9883b5366c2daef4ULL, 0x2d1b776cd85a41f5ULL, 0x120e363870242a62ULL, 0x6523af8c05cae960ULL, 0x02f506f3fb04f1f9ULL, 0xcf454c091283c6ddULL, 0x6321a58415c6e776ULL, 0x4fced11f3e9e5071ULL, 0xdb49703972abe2a9ULL, 0x742c9cb07de8c409ULL, 0x16f93ac39b2cd58dULL, 0x37e659bf636e8854ULL, 0xc7b654e2d993251eULL, 0x782888a05df0d825ULL, 0x39174b5cb8726581ULL, 0x9b82b032642ba9ffULL, 0x2e1a7268d05c46feULL, 0x808b9d162c1d96acULL, 0x1ffe21dfa33ec0bcULL, 0x838a9812241b91a7ULL, 0x1b092d2448363f53ULL, 0x46c9ca03068c4540ULL, 0x9487a1264c35b2d8ULL, 0xd24e6b254ab9f798ULL, 0x3ee142a35b7c9d65ULL, 0x722e96b86de4ca1fULL, 0x31e453b773628642ULL, 0x3de047a7537a9a6eULL, 0x20eb608b0b40ab2bULL, 0xad90ea7af447d759ULL, 0xf1a40eaa49ff5bb8ULL, 0x221e6678f0445ad2ULL, 0x9285ab2e5c39bcceULL, 0xa060fd9d275d3d87ULL, 0x0000000000000000ULL, 0x6f25b19435defb5aULL, 0x01f403f7f302f6f2ULL, 0x0ef112e3db1cedd5ULL, 0xa194fe6ad45fcb75ULL, 0x1d0b272c583a3145ULL, 0x34e75cbb6b688f5fULL, 0x9f75bcc98f235610ULL, 0x2cef749b2b58b707ULL, 0x5c34e4d0bdb88ce1ULL, 0x5331f5c495a697c6ULL, 0x61d4a377eec2168fULL, 0x6dd0b767ceda0aa3ULL, 0x9786a4224433b5d3ULL, 0x827e9be5d7196755ULL, 0xeaad238e01c964ebULL, 0x1afd2ed3bb34c9a1ULL, 0x7b298da455f6df2eULL, 0x5030f0c09da090cdULL, 0x4d3bd7ecc59aa188ULL, 0xbc9fd9468c65fa30ULL, 0x15f83fc7932ad286ULL, 0x57c6f93f7eae6829ULL, 0x35135f4c986a79adULL, 0x0a061e183014123aULL, 0x0f051114281e1b27ULL, 0x52c5f63366a46134ULL, 0x33115544886677bbULL, 0x9977b6c19f2f5806ULL, 0x847c91edc7156943ULL, 0x8e7a8ff5f7017b79ULL, 0x887885fde70d756fULL, 0x5a36eed8adb482f7ULL, 0x241c6c70e04854c4ULL, 0x4b39dde4d596af9eULL, 0xeb592079f2cb9219ULL, 0x28187860c05048e8ULL, 0xfa5613458ae9bf70ULL, 0xc8b345f6f18d3e39ULL, 0xcdb04afae9873724ULL, 0x6c24b4903dd8fc51ULL, 0x6020a0801dc0e07dULL, 0xcbb240f2f98b3932ULL, 0xab92e072e44bd94fULL, 0xf8a315b671ed4e89ULL, 0x5dc0e7274eba7a13ULL, 0xcc44490d1a85c1d6ULL, 0xa662f79537513391ULL, 0x30105040806070b0ULL, 0xc1b45eeac99f2b08ULL, 0x9184ae2a543fbbc5ULL, 0xc54352112297d4e7ULL, 0xa893e576ec4dde44ULL, 0x5bc2ed2f5eb67405ULL, 0xde4a7f356aa1ebb4ULL, 0xdabd73ce81a9145bULL, 0x8c8f89060c058a80ULL, 0x772d99b475eec302ULL, 0xd9bc76ca89af1350ULL, 0xb99cd64a946ff32dULL, 0xbe6adfb577610bc9ULL, 0xc0405d1d3a9dddfaULL, 0x4ccfd41b3698577aULL, 0xfba210b279eb4982ULL, 0x9d80ba3a7427a7e9ULL, 0xd14f6e2142bff093ULL, 0x211f637cf8425dd9ULL, 0x43cac50f1e864c5dULL, 0xe3aa389239db71daULL, 0xc64257152a91d3ecULL }; static const u64 T2[256] = { 0xd268bad36a01bbb9ULL, 0x4d1954fc66b1e59aULL, 0xbc932f7114cde265ULL, 0xcdb9749c1b512587ULL, 0x510253f557a4f7a2ULL, 0x6bb8d368be03d0d6ULL, 0x6fbdd26bb504d6deULL, 0x29644dd785feb352ULL, 0x5d0d50f04aadfdbaULL, 0x8a26ace9e063cf09ULL, 0x0e838d8a9684091cULL, 0xc679bfdc4d1aa591ULL, 0xddad7090374d3da7ULL, 0x550752f65ca3f1aaULL, 0x52c89ab317e17ba4ULL, 0x2d614cd48ef9b55aULL, 0x8f65ea2320ac4603ULL, 0x73a6d5628411c4e6ULL, 0x66f197a468c255ccULL, 0x63b2d16ea80ddcc6ULL, 0xccff3355d099aa85ULL, 0x590851f341aafbb2ULL, 0x712a5bed0f9cc7e2ULL, 0xa204a6f7ae55f359ULL, 0x5f81de7fc120febeULL, 0x3d7548d8a2e5ad7aULL, 0x9a32a8e5cc7fd729ULL, 0x5ec799b60ae871bcULL, 0x4b90db70e63be096ULL, 0xc8fa3256db9eac8dULL, 0xe651b7c4152295d1ULL, 0xd72bfc19aace32b3ULL, 0xab48e3387393704bULL, 0x42dc9ebf3bfd6384ULL, 0x7eef91ae52d041fcULL, 0x56cd9bb01ce67dacULL, 0xaf4de23b78947643ULL, 0xd66dbbd06106bdb1ULL, 0x195841c3f1da9b32ULL, 0xa5cb6eb2e5177957ULL, 0xae0ba5f2b35cf941ULL, 0x0bc0cb40564b8016ULL, 0xb1da6bbdc20c677fULL, 0x6efb95a27ecc59dcULL, 0xbe1fa1fe9f40e161ULL, 0xeb18f308c3e310cbULL, 0xfe4fb1ce2f3081e1ULL, 0x080a0206160e0c10ULL, 0x17dbcc49675e922eULL, 0x37f3c4513f66a26eULL, 0x74691d27cf534ee8ULL, 0x5044143c9c6c78a0ULL, 0x2be8c3580e73b056ULL, 0x91f263a59a34573fULL, 0x4f95da73ed3ce69eULL, 0x69345de7358ed3d2ULL, 0x613e5fe12380dfc2ULL, 0x578bdc79d72ef2aeULL, 0xe9947d87486e13cfULL, 0x13decd4a6c599426ULL, 0xe19e7f815e601fdfULL, 0x752f5aee049bc1eaULL, 0xadc16cb4f3197547ULL, 0x6d315ce43e89d5daULL, 0xfb0cf704efff08ebULL, 0x98be266a47f2d42dULL, 0xdb24ff1cb7c738abULL, 0x937eed2a11b9543bULL, 0x876fe82536a24a13ULL, 0x4ed39dba26f4699cULL, 0xa1ce6fb1ee107f5fULL, 0x028c8e8f8b8d0304ULL, 0x647d192be34f56c8ULL, 0xba1aa0fd9447e769ULL, 0xe717f00ddeea1ad3ULL, 0x1e978986ba98113cULL, 0x3c330f11692d2278ULL, 0x1c1b070931151238ULL, 0x8629afecfd6ac511ULL, 0xcb30fb109bdb208bULL, 0x2028081858383040ULL, 0x5441153f976b7ea8ULL, 0x34390d177f232e68ULL, 0x1014040c2c1c1820ULL, 0x040501030b070608ULL, 0x8de964acab214507ULL, 0x5b84df7cca27f8b6ULL, 0xc5b3769a0d5f2997ULL, 0xf980798b64720befULL, 0x538edd7adc29f4a6ULL, 0xf4c93d47b2b38ef5ULL, 0x584e163a8a6274b0ULL, 0xfcc33f41a4bd82e5ULL, 0xdceb3759fc85b2a5ULL, 0xa9c46db7f81e734fULL, 0xe0d8384895a890ddULL, 0xde67b9d67708b1a1ULL, 0xd1a273952a4437bfULL, 0x836ae9263da54c1bULL, 0xd4e1355fea8bbeb5ULL, 0x491c55ff6db6e392ULL, 0xd9a871933c4a3bafULL, 0xf18a7b8d727c07ffULL, 0x0a868c899d830f14ULL, 0xd5a77296214331b7ULL, 0x1a928885b19f1734ULL, 0xff09f607e4f80ee3ULL, 0xa8822a7e33d6fc4dULL, 0xf8c63e42afba84edULL, 0x653b5ee22887d9caULL, 0x9cbb27694cf5d225ULL, 0x054346cac0cf890aULL, 0x303c0c1474242860ULL, 0x89ec65afa026430fULL, 0xbdd568b8df056d67ULL, 0x99f861a38c3a5b2fULL, 0x0c0f03051d090a18ULL, 0x23e2c15e187dbc46ULL, 0x411657f97bb8ef82ULL, 0x7fa9d6679918cefeULL, 0x439ad976f035ec86ULL, 0x7d2558e81295cdfaULL, 0x479fd875fb32ea8eULL, 0x85e366aabd2f4917ULL, 0x7bacd764921fc8f6ULL, 0xe8d23a4e83a69ccdULL, 0x07cfc8454b428a0eULL, 0xf0cc3c44b9b488fdULL, 0xcf35fa1390dc2683ULL, 0x62f496a763c553c4ULL, 0xa601a7f4a552f551ULL, 0x5ac298b501ef77b4ULL, 0x977bec291abe5233ULL, 0xda62b8d57c0fb7a9ULL, 0x3bfcc754226fa876ULL, 0x822caeeff66dc319ULL, 0xb9d069bbd4026b6fULL, 0x317a4bddbfeca762ULL, 0x963dabe0d176dd31ULL, 0x9e37a9e6c778d121ULL, 0x81e667a9b6284f1fULL, 0x28220a1e4e363c50ULL, 0x014647c9cbc88f02ULL, 0xef1df20bc8e416c3ULL, 0xee5bb5c2032c99c1ULL, 0x88aa22666beecc0dULL, 0xb356e5324981647bULL, 0x9f71ee2f0cb05e23ULL, 0xc27cbedf461da399ULL, 0xac872b7d38d1fa45ULL, 0x3ebf819ee2a0217cULL, 0x485a1236a67e6c90ULL, 0x36b58398f4ae2d6cULL, 0x6c771b2df5415ad8ULL, 0x38360e12622a2470ULL, 0x8caf236560e9ca05ULL, 0xf306f502f9f104fbULL, 0x094c45cfddc68312ULL, 0x84a5216376e7c615ULL, 0x1fd1ce4f71509e3eULL, 0x397049dba9e2ab72ULL, 0xb09c2c7409c4e87dULL, 0xc33af9168dd52c9bULL, 0xbf59e63754886e63ULL, 0xe254b6c71e2593d9ULL, 0xa088287825d8f05dULL, 0x5c4b1739816572b8ULL, 0x32b0829bffa92b64ULL, 0x68721a2efe465cd0ULL, 0x169d8b80ac961d2cULL, 0xdf21fe1fbcc03ea3ULL, 0x12988a83a7911b24ULL, 0x242d091b533f3648ULL, 0x03cac94640458c06ULL, 0x26a18794d8b2354cULL, 0x256b4ed298f7b94aULL, 0xa342e13e659d7c5bULL, 0xb8962e721fcae46dULL, 0xb753e43142866273ULL, 0xa747e03d6e9a7a53ULL, 0x8b60eb202bab400bULL, 0x7aea90ad59d747f4ULL, 0xaa0ea4f1b85bff49ULL, 0x78661e22d25a44f0ULL, 0x2eab8592cebc395cULL, 0x9dfd60a0873d5d27ULL, 0x0000000000000000ULL, 0x94b1256f5afbde35ULL, 0xf703f401f2f602f3ULL, 0xe312f10ed5ed1cdbULL, 0x6afe94a175cb5fd4ULL, 0x2c270b1d45313a58ULL, 0xbb5ce7345f8f686bULL, 0xc9bc759f1056238fULL, 0x9b74ef2c07b7582bULL, 0xd0e4345ce18cb8bdULL, 0xc4f53153c697a695ULL, 0x77a3d4618f16c2eeULL, 0x67b7d06da30adaceULL, 0x22a48697d3b53344ULL, 0xe59b7e82556719d7ULL, 0x8e23adeaeb64c901ULL, 0xd32efd1aa1c934bbULL, 0xa48d297b2edff655ULL, 0xc0f03050cd90a09dULL, 0xecd73b4d88a19ac5ULL, 0x46d99fbc30fa658cULL, 0xc73ff81586d22a93ULL, 0x3ff9c6572968ae7eULL, 0x4c5f1335ad796a98ULL, 0x181e060a3a121430ULL, 0x1411050f271b1e28ULL, 0x33f6c5523461a466ULL, 0x44551133bb776688ULL, 0xc1b6779906582f9fULL, 0xed917c84436915c7ULL, 0xf58f7a8e797b01f7ULL, 0xfd8578886f750de7ULL, 0xd8ee365af782b4adULL, 0x706c1c24c45448e0ULL, 0xe4dd394b9eaf96d5ULL, 0x792059eb1992cbf2ULL, 0x60781828e84850c0ULL, 0x451356fa70bfe98aULL, 0xf645b3c8393e8df1ULL, 0xfa4ab0cd243787e9ULL, 0x90b4246c51fcd83dULL, 0x80a020607de0c01dULL, 0xf240b2cb32398bf9ULL, 0x72e092ab4fd94be4ULL, 0xb615a3f8894eed71ULL, 0x27e7c05d137aba4eULL, 0x0d4944ccd6c1851aULL, 0x95f762a691335137ULL, 0x40501030b0706080ULL, 0xea5eb4c1082b9fc9ULL, 0x2aae8491c5bb3f54ULL, 0x115243c5e7d49722ULL, 0x76e593a844de4decULL, 0x2fedc25b0574b65eULL, 0x357f4adeb4eba16aULL, 0xce73bdda5b14a981ULL, 0x06898f8c808a050cULL, 0xb4992d7702c3ee75ULL, 0xca76bcd95013af89ULL, 0x4ad69cb92df36f94ULL, 0xb5df6abec90b6177ULL, 0x1d5d40c0fadd9d3aULL, 0x1bd4cf4c7a579836ULL, 0xb210a2fb8249eb79ULL, 0x3aba809de9a72774ULL, 0x216e4fd193f0bf42ULL, 0x7c631f21d95d42f8ULL, 0x0fc5ca435d4c861eULL, 0x9238aae3da71db39ULL, 0x155742c6ecd3912aULL }; static const u64 T3[256] = { 0x68d2d3ba016ab9bbULL, 0x194dfc54b1669ae5ULL, 0x93bc712fcd1465e2ULL, 0xb9cd9c74511b8725ULL, 0x0251f553a457a2f7ULL, 0xb86b68d303bed6d0ULL, 0xbd6f6bd204b5ded6ULL, 0x6429d74dfe8552b3ULL, 0x0d5df050ad4abafdULL, 0x268ae9ac63e009cfULL, 0x830e8a8d84961c09ULL, 0x79c6dcbf1a4d91a5ULL, 0xaddd90704d37a73dULL, 0x0755f652a35caaf1ULL, 0xc852b39ae117a47bULL, 0x612dd44cf98e5ab5ULL, 0x658f23eaac200346ULL, 0xa67362d51184e6c4ULL, 0xf166a497c268cc55ULL, 0xb2636ed10da8c6dcULL, 0xffcc553399d085aaULL, 0x0859f351aa41b2fbULL, 0x2a71ed5b9c0fe2c7ULL, 0x04a2f7a655ae59f3ULL, 0x815f7fde20c1befeULL, 0x753dd848e5a27aadULL, 0x329ae5a87fcc29d7ULL, 0xc75eb699e80abc71ULL, 0x904b70db3be696e0ULL, 0xfac856329edb8dacULL, 0x51e6c4b72215d195ULL, 0x2bd719fcceaab332ULL, 0x48ab38e393734b70ULL, 0xdc42bf9efd3b8463ULL, 0xef7eae91d052fc41ULL, 0xcd56b09be61cac7dULL, 0x4daf3be294784376ULL, 0x6dd6d0bb0661b1bdULL, 0x5819c341daf1329bULL, 0xcba5b26e17e55779ULL, 0x0baef2a55cb341f9ULL, 0xc00b40cb4b561680ULL, 0xdab1bd6b0cc27f67ULL, 0xfb6ea295cc7edc59ULL, 0x1fbefea1409f61e1ULL, 0x18eb08f3e3c3cb10ULL, 0x4ffeceb1302fe181ULL, 0x0a0806020e16100cULL, 0xdb1749cc5e672e92ULL, 0xf33751c4663f6ea2ULL, 0x6974271d53cfe84eULL, 0x44503c146c9ca078ULL, 0xe82b58c3730e56b0ULL, 0xf291a563349a3f57ULL, 0x954f73da3ced9ee6ULL, 0x3469e75d8e35d2d3ULL, 0x3e61e15f8023c2dfULL, 0x8b5779dc2ed7aef2ULL, 0x94e9877d6e48cf13ULL, 0xde134acd596c2694ULL, 0x9ee1817f605edf1fULL, 0x2f75ee5a9b04eac1ULL, 0xc1adb46c19f34775ULL, 0x316de45c893edad5ULL, 0x0cfb04f7ffefeb08ULL, 0xbe986a26f2472dd4ULL, 0x24db1cffc7b7ab38ULL, 0x7e932aedb9113b54ULL, 0x6f8725e8a236134aULL, 0xd34eba9df4269c69ULL, 0xcea1b16f10ee5f7fULL, 0x8c028f8e8d8b0403ULL, 0x7d642b194fe3c856ULL, 0x1abafda0479469e7ULL, 0x17e70df0eaded31aULL, 0x971e868998ba3c11ULL, 0x333c110f2d697822ULL, 0x1b1c090715313812ULL, 0x2986ecaf6afd11c5ULL, 0x30cb10fbdb9b8b20ULL, 0x2820180838584030ULL, 0x41543f156b97a87eULL, 0x3934170d237f682eULL, 0x14100c041c2c2018ULL, 0x05040301070b0806ULL, 0xe98dac6421ab0745ULL, 0x845b7cdf27cab6f8ULL, 0xb3c59a765f0d9729ULL, 0x80f98b797264ef0bULL, 0x8e537add29dca6f4ULL, 0xc9f4473db3b2f58eULL, 0x4e583a16628ab074ULL, 0xc3fc413fbda4e582ULL, 0xebdc593785fca5b2ULL, 0xc4a9b76d1ef84f73ULL, 0xd8e04838a895dd90ULL, 0x67ded6b90877a1b1ULL, 0xa2d19573442abf37ULL, 0x6a8326e9a53d1b4cULL, 0xe1d45f358beab5beULL, 0x1c49ff55b66d92e3ULL, 0xa8d993714a3caf3bULL, 0x8af18d7b7c72ff07ULL, 0x860a898c839d140fULL, 0xa7d596724321b731ULL, 0x921a85889fb13417ULL, 0x09ff07f6f8e4e30eULL, 0x82a87e2ad6334dfcULL, 0xc6f8423ebaafed84ULL, 0x3b65e25e8728cad9ULL, 0xbb9c6927f54c25d2ULL, 0x4305ca46cfc00a89ULL, 0x3c30140c24746028ULL, 0xec89af6526a00f43ULL, 0xd5bdb86805df676dULL, 0xf899a3613a8c2f5bULL, 0x0f0c0503091d180aULL, 0xe2235ec17d1846bcULL, 0x1641f957b87b82efULL, 0xa97f67d61899feceULL, 0x9a4376d935f086ecULL, 0x257de8589512facdULL, 0x9f4775d832fb8eeaULL, 0xe385aa662fbd1749ULL, 0xac7b64d71f92f6c8ULL, 0xd2e84e3aa683cd9cULL, 0xcf0745c8424b0e8aULL, 0xccf0443cb4b9fd88ULL, 0x35cf13fadc908326ULL, 0xf462a796c563c453ULL, 0x01a6f4a752a551f5ULL, 0xc25ab598ef01b477ULL, 0x7b9729ecbe1a3352ULL, 0x62dad5b80f7ca9b7ULL, 0xfc3b54c76f2276a8ULL, 0x2c82efae6df619c3ULL, 0xd0b9bb6902d46f6bULL, 0x7a31dd4becbf62a7ULL, 0x3d96e0ab76d131ddULL, 0x379ee6a978c721d1ULL, 0xe681a96728b61f4fULL, 0x22281e0a364e503cULL, 0x4601c947c8cb028fULL, 0x1def0bf2e4c8c316ULL, 0x5beec2b52c03c199ULL, 0xaa886622ee6b0dccULL, 0x56b332e581497b64ULL, 0x719f2feeb00c235eULL, 0x7cc2dfbe1d4699a3ULL, 0x87ac7d2bd13845faULL, 0xbf3e9e81a0e27c21ULL, 0x5a4836127ea6906cULL, 0xb5369883aef46c2dULL, 0x776c2d1b41f5d85aULL, 0x3638120e2a627024ULL, 0xaf8c6523e96005caULL, 0x06f302f5f1f9fb04ULL, 0x4c09cf45c6dd1283ULL, 0xa5846321e77615c6ULL, 0xd11f4fce50713e9eULL, 0x7039db49e2a972abULL, 0x9cb0742cc4097de8ULL, 0x3ac316f9d58d9b2cULL, 0x59bf37e68854636eULL, 0x54e2c7b6251ed993ULL, 0x88a07828d8255df0ULL, 0x4b5c39176581b872ULL, 0xb0329b82a9ff642bULL, 0x72682e1a46fed05cULL, 0x9d16808b96ac2c1dULL, 0x21df1ffec0bca33eULL, 0x9812838a91a7241bULL, 0x2d241b093f534836ULL, 0xca0346c94540068cULL, 0xa1269487b2d84c35ULL, 0x6b25d24ef7984ab9ULL, 0x42a33ee19d655b7cULL, 0x96b8722eca1f6de4ULL, 0x53b731e486427362ULL, 0x47a73de09a6e537aULL, 0x608b20ebab2b0b40ULL, 0xea7aad90d759f447ULL, 0x0eaaf1a45bb849ffULL, 0x6678221e5ad2f044ULL, 0xab2e9285bcce5c39ULL, 0xfd9da0603d87275dULL, 0x0000000000000000ULL, 0xb1946f25fb5a35deULL, 0x03f701f4f6f2f302ULL, 0x12e30ef1edd5db1cULL, 0xfe6aa194cb75d45fULL, 0x272c1d0b3145583aULL, 0x5cbb34e78f5f6b68ULL, 0xbcc99f7556108f23ULL, 0x749b2cefb7072b58ULL, 0xe4d05c348ce1bdb8ULL, 0xf5c4533197c695a6ULL, 0xa37761d4168feec2ULL, 0xb7676dd00aa3cedaULL, 0xa4229786b5d34433ULL, 0x9be5827e6755d719ULL, 0x238eeaad64eb01c9ULL, 0x2ed31afdc9a1bb34ULL, 0x8da47b29df2e55f6ULL, 0xf0c0503090cd9da0ULL, 0xd7ec4d3ba188c59aULL, 0xd946bc9ffa308c65ULL, 0x3fc715f8d286932aULL, 0xf93f57c668297eaeULL, 0x5f4c351379ad986aULL, 0x1e180a06123a3014ULL, 0x11140f051b27281eULL, 0xf63352c5613466a4ULL, 0x5544331177bb8866ULL, 0xb6c1997758069f2fULL, 0x91ed847c6943c715ULL, 0x8ff58e7a7b79f701ULL, 0x85fd8878756fe70dULL, 0xeed85a3682f7adb4ULL, 0x6c70241c54c4e048ULL, 0xdde44b39af9ed596ULL, 0x2079eb599219f2cbULL, 0x7860281848e8c050ULL, 0x1345fa56bf708ae9ULL, 0x45f6c8b33e39f18dULL, 0x4afacdb03724e987ULL, 0xb4906c24fc513dd8ULL, 0xa0806020e07d1dc0ULL, 0x40f2cbb23932f98bULL, 0xe072ab92d94fe44bULL, 0x15b6f8a34e8971edULL, 0xe7275dc07a134ebaULL, 0x490dcc44c1d61a85ULL, 0xf795a66233913751ULL, 0x5040301070b08060ULL, 0x5eeac1b42b08c99fULL, 0xae2a9184bbc5543fULL, 0x5211c543d4e72297ULL, 0xe576a893de44ec4dULL, 0xed2f5bc274055eb6ULL, 0x7f35de4aebb46aa1ULL, 0x73cedabd145b81a9ULL, 0x89068c8f8a800c05ULL, 0x99b4772dc30275eeULL, 0x76cad9bc135089afULL, 0xd64ab99cf32d946fULL, 0xdfb5be6a0bc97761ULL, 0x5d1dc040ddfa3a9dULL, 0xd41b4ccf577a3698ULL, 0x10b2fba2498279ebULL, 0xba3a9d80a7e97427ULL, 0x6e21d14ff09342bfULL, 0x637c211f5dd9f842ULL, 0xc50f43ca4c5d1e86ULL, 0x3892e3aa71da39dbULL, 0x5715c642d3ec2a91ULL }; static const u64 T4[256] = { 0xbbb96a01bad3d268ULL, 0xe59a66b154fc4d19ULL, 0xe26514cd2f71bc93ULL, 0x25871b51749ccdb9ULL, 0xf7a257a453f55102ULL, 0xd0d6be03d3686bb8ULL, 0xd6deb504d26b6fbdULL, 0xb35285fe4dd72964ULL, 0xfdba4aad50f05d0dULL, 0xcf09e063ace98a26ULL, 0x091c96848d8a0e83ULL, 0xa5914d1abfdcc679ULL, 0x3da7374d7090ddadULL, 0xf1aa5ca352f65507ULL, 0x7ba417e19ab352c8ULL, 0xb55a8ef94cd42d61ULL, 0x460320acea238f65ULL, 0xc4e68411d56273a6ULL, 0x55cc68c297a466f1ULL, 0xdcc6a80dd16e63b2ULL, 0xaa85d0993355ccffULL, 0xfbb241aa51f35908ULL, 0xc7e20f9c5bed712aULL, 0xf359ae55a6f7a204ULL, 0xfebec120de7f5f81ULL, 0xad7aa2e548d83d75ULL, 0xd729cc7fa8e59a32ULL, 0x71bc0ae899b65ec7ULL, 0xe096e63bdb704b90ULL, 0xac8ddb9e3256c8faULL, 0x95d11522b7c4e651ULL, 0x32b3aacefc19d72bULL, 0x704b7393e338ab48ULL, 0x63843bfd9ebf42dcULL, 0x41fc52d091ae7eefULL, 0x7dac1ce69bb056cdULL, 0x76437894e23baf4dULL, 0xbdb16106bbd0d66dULL, 0x9b32f1da41c31958ULL, 0x7957e5176eb2a5cbULL, 0xf941b35ca5f2ae0bULL, 0x8016564bcb400bc0ULL, 0x677fc20c6bbdb1daULL, 0x59dc7ecc95a26efbULL, 0xe1619f40a1febe1fULL, 0x10cbc3e3f308eb18ULL, 0x81e12f30b1cefe4fULL, 0x0c10160e0206080aULL, 0x922e675ecc4917dbULL, 0xa26e3f66c45137f3ULL, 0x4ee8cf531d277469ULL, 0x78a09c6c143c5044ULL, 0xb0560e73c3582be8ULL, 0x573f9a3463a591f2ULL, 0xe69eed3cda734f95ULL, 0xd3d2358e5de76934ULL, 0xdfc223805fe1613eULL, 0xf2aed72edc79578bULL, 0x13cf486e7d87e994ULL, 0x94266c59cd4a13deULL, 0x1fdf5e607f81e19eULL, 0xc1ea049b5aee752fULL, 0x7547f3196cb4adc1ULL, 0xd5da3e895ce46d31ULL, 0x08ebeffff704fb0cULL, 0xd42d47f2266a98beULL, 0x38abb7c7ff1cdb24ULL, 0x543b11b9ed2a937eULL, 0x4a1336a2e825876fULL, 0x699c26f49dba4ed3ULL, 0x7f5fee106fb1a1ceULL, 0x03048b8d8e8f028cULL, 0x56c8e34f192b647dULL, 0xe7699447a0fdba1aULL, 0x1ad3deeaf00de717ULL, 0x113cba9889861e97ULL, 0x2278692d0f113c33ULL, 0x1238311507091c1bULL, 0xc511fd6aafec8629ULL, 0x208b9bdbfb10cb30ULL, 0x3040583808182028ULL, 0x7ea8976b153f5441ULL, 0x2e687f230d173439ULL, 0x18202c1c040c1014ULL, 0x06080b0701030405ULL, 0x4507ab2164ac8de9ULL, 0xf8b6ca27df7c5b84ULL, 0x29970d5f769ac5b3ULL, 0x0bef6472798bf980ULL, 0xf4a6dc29dd7a538eULL, 0x8ef5b2b33d47f4c9ULL, 0x74b08a62163a584eULL, 0x82e5a4bd3f41fcc3ULL, 0xb2a5fc853759dcebULL, 0x734ff81e6db7a9c4ULL, 0x90dd95a83848e0d8ULL, 0xb1a17708b9d6de67ULL, 0x37bf2a447395d1a2ULL, 0x4c1b3da5e926836aULL, 0xbeb5ea8b355fd4e1ULL, 0xe3926db655ff491cULL, 0x3baf3c4a7193d9a8ULL, 0x07ff727c7b8df18aULL, 0x0f149d838c890a86ULL, 0x31b721437296d5a7ULL, 0x1734b19f88851a92ULL, 0x0ee3e4f8f607ff09ULL, 0xfc4d33d62a7ea882ULL, 0x84edafba3e42f8c6ULL, 0xd9ca28875ee2653bULL, 0xd2254cf527699cbbULL, 0x890ac0cf46ca0543ULL, 0x286074240c14303cULL, 0x430fa02665af89ecULL, 0x6d67df0568b8bdd5ULL, 0x5b2f8c3a61a399f8ULL, 0x0a181d0903050c0fULL, 0xbc46187dc15e23e2ULL, 0xef827bb857f94116ULL, 0xcefe9918d6677fa9ULL, 0xec86f035d976439aULL, 0xcdfa129558e87d25ULL, 0xea8efb32d875479fULL, 0x4917bd2f66aa85e3ULL, 0xc8f6921fd7647bacULL, 0x9ccd83a63a4ee8d2ULL, 0x8a0e4b42c84507cfULL, 0x88fdb9b43c44f0ccULL, 0x268390dcfa13cf35ULL, 0x53c463c596a762f4ULL, 0xf551a552a7f4a601ULL, 0x77b401ef98b55ac2ULL, 0x52331abeec29977bULL, 0xb7a97c0fb8d5da62ULL, 0xa876226fc7543bfcULL, 0xc319f66daeef822cULL, 0x6b6fd40269bbb9d0ULL, 0xa762bfec4bdd317aULL, 0xdd31d176abe0963dULL, 0xd121c778a9e69e37ULL, 0x4f1fb62867a981e6ULL, 0x3c504e360a1e2822ULL, 0x8f02cbc847c90146ULL, 0x16c3c8e4f20bef1dULL, 0x99c1032cb5c2ee5bULL, 0xcc0d6bee226688aaULL, 0x647b4981e532b356ULL, 0x5e230cb0ee2f9f71ULL, 0xa399461dbedfc27cULL, 0xfa4538d12b7dac87ULL, 0x217ce2a0819e3ebfULL, 0x6c90a67e1236485aULL, 0x2d6cf4ae839836b5ULL, 0x5ad8f5411b2d6c77ULL, 0x2470622a0e123836ULL, 0xca0560e923658cafULL, 0x04fbf9f1f502f306ULL, 0x8312ddc645cf094cULL, 0xc61576e7216384a5ULL, 0x9e3e7150ce4f1fd1ULL, 0xab72a9e249db3970ULL, 0xe87d09c42c74b09cULL, 0x2c9b8dd5f916c33aULL, 0x6e635488e637bf59ULL, 0x93d91e25b6c7e254ULL, 0xf05d25d82878a088ULL, 0x72b8816517395c4bULL, 0x2b64ffa9829b32b0ULL, 0x5cd0fe461a2e6872ULL, 0x1d2cac968b80169dULL, 0x3ea3bcc0fe1fdf21ULL, 0x1b24a7918a831298ULL, 0x3648533f091b242dULL, 0x8c064045c94603caULL, 0x354cd8b2879426a1ULL, 0xb94a98f74ed2256bULL, 0x7c5b659de13ea342ULL, 0xe46d1fca2e72b896ULL, 0x62734286e431b753ULL, 0x7a536e9ae03da747ULL, 0x400b2babeb208b60ULL, 0x47f459d790ad7aeaULL, 0xff49b85ba4f1aa0eULL, 0x44f0d25a1e227866ULL, 0x395ccebc85922eabULL, 0x5d27873d60a09dfdULL, 0x0000000000000000ULL, 0xde355afb256f94b1ULL, 0x02f3f2f6f401f703ULL, 0x1cdbd5edf10ee312ULL, 0x5fd475cb94a16afeULL, 0x3a5845310b1d2c27ULL, 0x686b5f8fe734bb5cULL, 0x238f1056759fc9bcULL, 0x582b07b7ef2c9b74ULL, 0xb8bde18c345cd0e4ULL, 0xa695c6973153c4f5ULL, 0xc2ee8f16d46177a3ULL, 0xdacea30ad06d67b7ULL, 0x3344d3b5869722a4ULL, 0x19d755677e82e59bULL, 0xc901eb64adea8e23ULL, 0x34bba1c9fd1ad32eULL, 0xf6552edf297ba48dULL, 0xa09dcd903050c0f0ULL, 0x9ac588a13b4decd7ULL, 0x658c30fa9fbc46d9ULL, 0x2a9386d2f815c73fULL, 0xae7e2968c6573ff9ULL, 0x6a98ad7913354c5fULL, 0x14303a12060a181eULL, 0x1e28271b050f1411ULL, 0xa4663461c55233f6ULL, 0x6688bb7711334455ULL, 0x2f9f06587799c1b6ULL, 0x15c743697c84ed91ULL, 0x01f7797b7a8ef58fULL, 0x0de76f757888fd85ULL, 0xb4adf782365ad8eeULL, 0x48e0c4541c24706cULL, 0x96d59eaf394be4ddULL, 0xcbf2199259eb7920ULL, 0x50c0e84818286078ULL, 0xe98a70bf56fa4513ULL, 0x8df1393eb3c8f645ULL, 0x87e92437b0cdfa4aULL, 0xd83d51fc246c90b4ULL, 0xc01d7de0206080a0ULL, 0x8bf93239b2cbf240ULL, 0x4be44fd992ab72e0ULL, 0xed71894ea3f8b615ULL, 0xba4e137ac05d27e7ULL, 0x851ad6c144cc0d49ULL, 0x5137913362a695f7ULL, 0x6080b07010304050ULL, 0x9fc9082bb4c1ea5eULL, 0x3f54c5bb84912aaeULL, 0x9722e7d443c51152ULL, 0x4dec44de93a876e5ULL, 0xb65e0574c25b2fedULL, 0xa16ab4eb4ade357fULL, 0xa9815b14bddace73ULL, 0x050c808a8f8c0689ULL, 0xee7502c32d77b499ULL, 0xaf895013bcd9ca76ULL, 0x6f942df39cb94ad6ULL, 0x6177c90b6abeb5dfULL, 0x9d3afadd40c01d5dULL, 0x98367a57cf4c1bd4ULL, 0xeb798249a2fbb210ULL, 0x2774e9a7809d3abaULL, 0xbf4293f04fd1216eULL, 0x42f8d95d1f217c63ULL, 0x861e5d4cca430fc5ULL, 0xdb39da71aae39238ULL, 0x912aecd342c61557ULL }; static const u64 T5[256] = { 0xb9bb016ad3ba68d2ULL, 0x9ae5b166fc54194dULL, 0x65e2cd14712f93bcULL, 0x8725511b9c74b9cdULL, 0xa2f7a457f5530251ULL, 0xd6d003be68d3b86bULL, 0xded604b56bd2bd6fULL, 0x52b3fe85d74d6429ULL, 0xbafdad4af0500d5dULL, 0x09cf63e0e9ac268aULL, 0x1c0984968a8d830eULL, 0x91a51a4ddcbf79c6ULL, 0xa73d4d379070adddULL, 0xaaf1a35cf6520755ULL, 0xa47be117b39ac852ULL, 0x5ab5f98ed44c612dULL, 0x0346ac2023ea658fULL, 0xe6c4118462d5a673ULL, 0xcc55c268a497f166ULL, 0xc6dc0da86ed1b263ULL, 0x85aa99d05533ffccULL, 0xb2fbaa41f3510859ULL, 0xe2c79c0fed5b2a71ULL, 0x59f355aef7a604a2ULL, 0xbefe20c17fde815fULL, 0x7aade5a2d848753dULL, 0x29d77fcce5a8329aULL, 0xbc71e80ab699c75eULL, 0x96e03be670db904bULL, 0x8dac9edb5632fac8ULL, 0xd1952215c4b751e6ULL, 0xb332ceaa19fc2bd7ULL, 0x4b70937338e348abULL, 0x8463fd3bbf9edc42ULL, 0xfc41d052ae91ef7eULL, 0xac7de61cb09bcd56ULL, 0x437694783be24dafULL, 0xb1bd0661d0bb6dd6ULL, 0x329bdaf1c3415819ULL, 0x577917e5b26ecba5ULL, 0x41f95cb3f2a50baeULL, 0x16804b5640cbc00bULL, 0x7f670cc2bd6bdab1ULL, 0xdc59cc7ea295fb6eULL, 0x61e1409ffea11fbeULL, 0xcb10e3c308f318ebULL, 0xe181302fceb14ffeULL, 0x100c0e1606020a08ULL, 0x2e925e6749ccdb17ULL, 0x6ea2663f51c4f337ULL, 0xe84e53cf271d6974ULL, 0xa0786c9c3c144450ULL, 0x56b0730e58c3e82bULL, 0x3f57349aa563f291ULL, 0x9ee63ced73da954fULL, 0xd2d38e35e75d3469ULL, 0xc2df8023e15f3e61ULL, 0xaef22ed779dc8b57ULL, 0xcf136e48877d94e9ULL, 0x2694596c4acdde13ULL, 0xdf1f605e817f9ee1ULL, 0xeac19b04ee5a2f75ULL, 0x477519f3b46cc1adULL, 0xdad5893ee45c316dULL, 0xeb08ffef04f70cfbULL, 0x2dd4f2476a26be98ULL, 0xab38c7b71cff24dbULL, 0x3b54b9112aed7e93ULL, 0x134aa23625e86f87ULL, 0x9c69f426ba9dd34eULL, 0x5f7f10eeb16fcea1ULL, 0x04038d8b8f8e8c02ULL, 0xc8564fe32b197d64ULL, 0x69e74794fda01abaULL, 0xd31aeade0df017e7ULL, 0x3c1198ba8689971eULL, 0x78222d69110f333cULL, 0x3812153109071b1cULL, 0x11c56afdecaf2986ULL, 0x8b20db9b10fb30cbULL, 0x4030385818082820ULL, 0xa87e6b973f154154ULL, 0x682e237f170d3934ULL, 0x20181c2c0c041410ULL, 0x0806070b03010504ULL, 0x074521abac64e98dULL, 0xb6f827ca7cdf845bULL, 0x97295f0d9a76b3c5ULL, 0xef0b72648b7980f9ULL, 0xa6f429dc7add8e53ULL, 0xf58eb3b2473dc9f4ULL, 0xb074628a3a164e58ULL, 0xe582bda4413fc3fcULL, 0xa5b285fc5937ebdcULL, 0x4f731ef8b76dc4a9ULL, 0xdd90a8954838d8e0ULL, 0xa1b10877d6b967deULL, 0xbf37442a9573a2d1ULL, 0x1b4ca53d26e96a83ULL, 0xb5be8bea5f35e1d4ULL, 0x92e3b66dff551c49ULL, 0xaf3b4a3c9371a8d9ULL, 0xff077c728d7b8af1ULL, 0x140f839d898c860aULL, 0xb73143219672a7d5ULL, 0x34179fb18588921aULL, 0xe30ef8e407f609ffULL, 0x4dfcd6337e2a82a8ULL, 0xed84baaf423ec6f8ULL, 0xcad98728e25e3b65ULL, 0x25d2f54c6927bb9cULL, 0x0a89cfc0ca464305ULL, 0x60282474140c3c30ULL, 0x0f4326a0af65ec89ULL, 0x676d05dfb868d5bdULL, 0x2f5b3a8ca361f899ULL, 0x180a091d05030f0cULL, 0x46bc7d185ec1e223ULL, 0x82efb87bf9571641ULL, 0xfece189967d6a97fULL, 0x86ec35f076d99a43ULL, 0xfacd9512e858257dULL, 0x8eea32fb75d89f47ULL, 0x17492fbdaa66e385ULL, 0xf6c81f9264d7ac7bULL, 0xcd9ca6834e3ad2e8ULL, 0x0e8a424b45c8cf07ULL, 0xfd88b4b9443cccf0ULL, 0x8326dc9013fa35cfULL, 0xc453c563a796f462ULL, 0x51f552a5f4a701a6ULL, 0xb477ef01b598c25aULL, 0x3352be1a29ec7b97ULL, 0xa9b70f7cd5b862daULL, 0x76a86f2254c7fc3bULL, 0x19c36df6efae2c82ULL, 0x6f6b02d4bb69d0b9ULL, 0x62a7ecbfdd4b7a31ULL, 0x31dd76d1e0ab3d96ULL, 0x21d178c7e6a9379eULL, 0x1f4f28b6a967e681ULL, 0x503c364e1e0a2228ULL, 0x028fc8cbc9474601ULL, 0xc316e4c80bf21defULL, 0xc1992c03c2b55beeULL, 0x0dccee6b6622aa88ULL, 0x7b64814932e556b3ULL, 0x235eb00c2fee719fULL, 0x99a31d46dfbe7cc2ULL, 0x45fad1387d2b87acULL, 0x7c21a0e29e81bf3eULL, 0x906c7ea636125a48ULL, 0x6c2daef49883b536ULL, 0xd85a41f52d1b776cULL, 0x70242a62120e3638ULL, 0x05cae9606523af8cULL, 0xfb04f1f902f506f3ULL, 0x1283c6ddcf454c09ULL, 0x15c6e7766321a584ULL, 0x3e9e50714fced11fULL, 0x72abe2a9db497039ULL, 0x7de8c409742c9cb0ULL, 0x9b2cd58d16f93ac3ULL, 0x636e885437e659bfULL, 0xd993251ec7b654e2ULL, 0x5df0d825782888a0ULL, 0xb872658139174b5cULL, 0x642ba9ff9b82b032ULL, 0xd05c46fe2e1a7268ULL, 0x2c1d96ac808b9d16ULL, 0xa33ec0bc1ffe21dfULL, 0x241b91a7838a9812ULL, 0x48363f531b092d24ULL, 0x068c454046c9ca03ULL, 0x4c35b2d89487a126ULL, 0x4ab9f798d24e6b25ULL, 0x5b7c9d653ee142a3ULL, 0x6de4ca1f722e96b8ULL, 0x7362864231e453b7ULL, 0x537a9a6e3de047a7ULL, 0x0b40ab2b20eb608bULL, 0xf447d759ad90ea7aULL, 0x49ff5bb8f1a40eaaULL, 0xf0445ad2221e6678ULL, 0x5c39bcce9285ab2eULL, 0x275d3d87a060fd9dULL, 0x0000000000000000ULL, 0x35defb5a6f25b194ULL, 0xf302f6f201f403f7ULL, 0xdb1cedd50ef112e3ULL, 0xd45fcb75a194fe6aULL, 0x583a31451d0b272cULL, 0x6b688f5f34e75cbbULL, 0x8f2356109f75bcc9ULL, 0x2b58b7072cef749bULL, 0xbdb88ce15c34e4d0ULL, 0x95a697c65331f5c4ULL, 0xeec2168f61d4a377ULL, 0xceda0aa36dd0b767ULL, 0x4433b5d39786a422ULL, 0xd7196755827e9be5ULL, 0x01c964ebeaad238eULL, 0xbb34c9a11afd2ed3ULL, 0x55f6df2e7b298da4ULL, 0x9da090cd5030f0c0ULL, 0xc59aa1884d3bd7ecULL, 0x8c65fa30bc9fd946ULL, 0x932ad28615f83fc7ULL, 0x7eae682957c6f93fULL, 0x986a79ad35135f4cULL, 0x3014123a0a061e18ULL, 0x281e1b270f051114ULL, 0x66a4613452c5f633ULL, 0x886677bb33115544ULL, 0x9f2f58069977b6c1ULL, 0xc7156943847c91edULL, 0xf7017b798e7a8ff5ULL, 0xe70d756f887885fdULL, 0xadb482f75a36eed8ULL, 0xe04854c4241c6c70ULL, 0xd596af9e4b39dde4ULL, 0xf2cb9219eb592079ULL, 0xc05048e828187860ULL, 0x8ae9bf70fa561345ULL, 0xf18d3e39c8b345f6ULL, 0xe9873724cdb04afaULL, 0x3dd8fc516c24b490ULL, 0x1dc0e07d6020a080ULL, 0xf98b3932cbb240f2ULL, 0xe44bd94fab92e072ULL, 0x71ed4e89f8a315b6ULL, 0x4eba7a135dc0e727ULL, 0x1a85c1d6cc44490dULL, 0x37513391a662f795ULL, 0x806070b030105040ULL, 0xc99f2b08c1b45eeaULL, 0x543fbbc59184ae2aULL, 0x2297d4e7c5435211ULL, 0xec4dde44a893e576ULL, 0x5eb674055bc2ed2fULL, 0x6aa1ebb4de4a7f35ULL, 0x81a9145bdabd73ceULL, 0x0c058a808c8f8906ULL, 0x75eec302772d99b4ULL, 0x89af1350d9bc76caULL, 0x946ff32db99cd64aULL, 0x77610bc9be6adfb5ULL, 0x3a9dddfac0405d1dULL, 0x3698577a4ccfd41bULL, 0x79eb4982fba210b2ULL, 0x7427a7e99d80ba3aULL, 0x42bff093d14f6e21ULL, 0xf8425dd9211f637cULL, 0x1e864c5d43cac50fULL, 0x39db71dae3aa3892ULL, 0x2a91d3ecc6425715ULL }; static const u64 T6[256] = { 0x6a01bbb9d268bad3ULL, 0x66b1e59a4d1954fcULL, 0x14cde265bc932f71ULL, 0x1b512587cdb9749cULL, 0x57a4f7a2510253f5ULL, 0xbe03d0d66bb8d368ULL, 0xb504d6de6fbdd26bULL, 0x85feb35229644dd7ULL, 0x4aadfdba5d0d50f0ULL, 0xe063cf098a26ace9ULL, 0x9684091c0e838d8aULL, 0x4d1aa591c679bfdcULL, 0x374d3da7ddad7090ULL, 0x5ca3f1aa550752f6ULL, 0x17e17ba452c89ab3ULL, 0x8ef9b55a2d614cd4ULL, 0x20ac46038f65ea23ULL, 0x8411c4e673a6d562ULL, 0x68c255cc66f197a4ULL, 0xa80ddcc663b2d16eULL, 0xd099aa85ccff3355ULL, 0x41aafbb2590851f3ULL, 0x0f9cc7e2712a5bedULL, 0xae55f359a204a6f7ULL, 0xc120febe5f81de7fULL, 0xa2e5ad7a3d7548d8ULL, 0xcc7fd7299a32a8e5ULL, 0x0ae871bc5ec799b6ULL, 0xe63be0964b90db70ULL, 0xdb9eac8dc8fa3256ULL, 0x152295d1e651b7c4ULL, 0xaace32b3d72bfc19ULL, 0x7393704bab48e338ULL, 0x3bfd638442dc9ebfULL, 0x52d041fc7eef91aeULL, 0x1ce67dac56cd9bb0ULL, 0x78947643af4de23bULL, 0x6106bdb1d66dbbd0ULL, 0xf1da9b32195841c3ULL, 0xe5177957a5cb6eb2ULL, 0xb35cf941ae0ba5f2ULL, 0x564b80160bc0cb40ULL, 0xc20c677fb1da6bbdULL, 0x7ecc59dc6efb95a2ULL, 0x9f40e161be1fa1feULL, 0xc3e310cbeb18f308ULL, 0x2f3081e1fe4fb1ceULL, 0x160e0c10080a0206ULL, 0x675e922e17dbcc49ULL, 0x3f66a26e37f3c451ULL, 0xcf534ee874691d27ULL, 0x9c6c78a05044143cULL, 0x0e73b0562be8c358ULL, 0x9a34573f91f263a5ULL, 0xed3ce69e4f95da73ULL, 0x358ed3d269345de7ULL, 0x2380dfc2613e5fe1ULL, 0xd72ef2ae578bdc79ULL, 0x486e13cfe9947d87ULL, 0x6c59942613decd4aULL, 0x5e601fdfe19e7f81ULL, 0x049bc1ea752f5aeeULL, 0xf3197547adc16cb4ULL, 0x3e89d5da6d315ce4ULL, 0xefff08ebfb0cf704ULL, 0x47f2d42d98be266aULL, 0xb7c738abdb24ff1cULL, 0x11b9543b937eed2aULL, 0x36a24a13876fe825ULL, 0x26f4699c4ed39dbaULL, 0xee107f5fa1ce6fb1ULL, 0x8b8d0304028c8e8fULL, 0xe34f56c8647d192bULL, 0x9447e769ba1aa0fdULL, 0xdeea1ad3e717f00dULL, 0xba98113c1e978986ULL, 0x692d22783c330f11ULL, 0x311512381c1b0709ULL, 0xfd6ac5118629afecULL, 0x9bdb208bcb30fb10ULL, 0x5838304020280818ULL, 0x976b7ea85441153fULL, 0x7f232e6834390d17ULL, 0x2c1c18201014040cULL, 0x0b07060804050103ULL, 0xab2145078de964acULL, 0xca27f8b65b84df7cULL, 0x0d5f2997c5b3769aULL, 0x64720beff980798bULL, 0xdc29f4a6538edd7aULL, 0xb2b38ef5f4c93d47ULL, 0x8a6274b0584e163aULL, 0xa4bd82e5fcc33f41ULL, 0xfc85b2a5dceb3759ULL, 0xf81e734fa9c46db7ULL, 0x95a890dde0d83848ULL, 0x7708b1a1de67b9d6ULL, 0x2a4437bfd1a27395ULL, 0x3da54c1b836ae926ULL, 0xea8bbeb5d4e1355fULL, 0x6db6e392491c55ffULL, 0x3c4a3bafd9a87193ULL, 0x727c07fff18a7b8dULL, 0x9d830f140a868c89ULL, 0x214331b7d5a77296ULL, 0xb19f17341a928885ULL, 0xe4f80ee3ff09f607ULL, 0x33d6fc4da8822a7eULL, 0xafba84edf8c63e42ULL, 0x2887d9ca653b5ee2ULL, 0x4cf5d2259cbb2769ULL, 0xc0cf890a054346caULL, 0x74242860303c0c14ULL, 0xa026430f89ec65afULL, 0xdf056d67bdd568b8ULL, 0x8c3a5b2f99f861a3ULL, 0x1d090a180c0f0305ULL, 0x187dbc4623e2c15eULL, 0x7bb8ef82411657f9ULL, 0x9918cefe7fa9d667ULL, 0xf035ec86439ad976ULL, 0x1295cdfa7d2558e8ULL, 0xfb32ea8e479fd875ULL, 0xbd2f491785e366aaULL, 0x921fc8f67bacd764ULL, 0x83a69ccde8d23a4eULL, 0x4b428a0e07cfc845ULL, 0xb9b488fdf0cc3c44ULL, 0x90dc2683cf35fa13ULL, 0x63c553c462f496a7ULL, 0xa552f551a601a7f4ULL, 0x01ef77b45ac298b5ULL, 0x1abe5233977bec29ULL, 0x7c0fb7a9da62b8d5ULL, 0x226fa8763bfcc754ULL, 0xf66dc319822caeefULL, 0xd4026b6fb9d069bbULL, 0xbfeca762317a4bddULL, 0xd176dd31963dabe0ULL, 0xc778d1219e37a9e6ULL, 0xb6284f1f81e667a9ULL, 0x4e363c5028220a1eULL, 0xcbc88f02014647c9ULL, 0xc8e416c3ef1df20bULL, 0x032c99c1ee5bb5c2ULL, 0x6beecc0d88aa2266ULL, 0x4981647bb356e532ULL, 0x0cb05e239f71ee2fULL, 0x461da399c27cbedfULL, 0x38d1fa45ac872b7dULL, 0xe2a0217c3ebf819eULL, 0xa67e6c90485a1236ULL, 0xf4ae2d6c36b58398ULL, 0xf5415ad86c771b2dULL, 0x622a247038360e12ULL, 0x60e9ca058caf2365ULL, 0xf9f104fbf306f502ULL, 0xddc68312094c45cfULL, 0x76e7c61584a52163ULL, 0x71509e3e1fd1ce4fULL, 0xa9e2ab72397049dbULL, 0x09c4e87db09c2c74ULL, 0x8dd52c9bc33af916ULL, 0x54886e63bf59e637ULL, 0x1e2593d9e254b6c7ULL, 0x25d8f05da0882878ULL, 0x816572b85c4b1739ULL, 0xffa92b6432b0829bULL, 0xfe465cd068721a2eULL, 0xac961d2c169d8b80ULL, 0xbcc03ea3df21fe1fULL, 0xa7911b2412988a83ULL, 0x533f3648242d091bULL, 0x40458c0603cac946ULL, 0xd8b2354c26a18794ULL, 0x98f7b94a256b4ed2ULL, 0x659d7c5ba342e13eULL, 0x1fcae46db8962e72ULL, 0x42866273b753e431ULL, 0x6e9a7a53a747e03dULL, 0x2bab400b8b60eb20ULL, 0x59d747f47aea90adULL, 0xb85bff49aa0ea4f1ULL, 0xd25a44f078661e22ULL, 0xcebc395c2eab8592ULL, 0x873d5d279dfd60a0ULL, 0x0000000000000000ULL, 0x5afbde3594b1256fULL, 0xf2f602f3f703f401ULL, 0xd5ed1cdbe312f10eULL, 0x75cb5fd46afe94a1ULL, 0x45313a582c270b1dULL, 0x5f8f686bbb5ce734ULL, 0x1056238fc9bc759fULL, 0x07b7582b9b74ef2cULL, 0xe18cb8bdd0e4345cULL, 0xc697a695c4f53153ULL, 0x8f16c2ee77a3d461ULL, 0xa30adace67b7d06dULL, 0xd3b5334422a48697ULL, 0x556719d7e59b7e82ULL, 0xeb64c9018e23adeaULL, 0xa1c934bbd32efd1aULL, 0x2edff655a48d297bULL, 0xcd90a09dc0f03050ULL, 0x88a19ac5ecd73b4dULL, 0x30fa658c46d99fbcULL, 0x86d22a93c73ff815ULL, 0x2968ae7e3ff9c657ULL, 0xad796a984c5f1335ULL, 0x3a121430181e060aULL, 0x271b1e281411050fULL, 0x3461a46633f6c552ULL, 0xbb77668844551133ULL, 0x06582f9fc1b67799ULL, 0x436915c7ed917c84ULL, 0x797b01f7f58f7a8eULL, 0x6f750de7fd857888ULL, 0xf782b4add8ee365aULL, 0xc45448e0706c1c24ULL, 0x9eaf96d5e4dd394bULL, 0x1992cbf2792059ebULL, 0xe84850c060781828ULL, 0x70bfe98a451356faULL, 0x393e8df1f645b3c8ULL, 0x243787e9fa4ab0cdULL, 0x51fcd83d90b4246cULL, 0x7de0c01d80a02060ULL, 0x32398bf9f240b2cbULL, 0x4fd94be472e092abULL, 0x894eed71b615a3f8ULL, 0x137aba4e27e7c05dULL, 0xd6c1851a0d4944ccULL, 0x9133513795f762a6ULL, 0xb070608040501030ULL, 0x082b9fc9ea5eb4c1ULL, 0xc5bb3f542aae8491ULL, 0xe7d49722115243c5ULL, 0x44de4dec76e593a8ULL, 0x0574b65e2fedc25bULL, 0xb4eba16a357f4adeULL, 0x5b14a981ce73bddaULL, 0x808a050c06898f8cULL, 0x02c3ee75b4992d77ULL, 0x5013af89ca76bcd9ULL, 0x2df36f944ad69cb9ULL, 0xc90b6177b5df6abeULL, 0xfadd9d3a1d5d40c0ULL, 0x7a5798361bd4cf4cULL, 0x8249eb79b210a2fbULL, 0xe9a727743aba809dULL, 0x93f0bf42216e4fd1ULL, 0xd95d42f87c631f21ULL, 0x5d4c861e0fc5ca43ULL, 0xda71db399238aae3ULL, 0xecd3912a155742c6ULL }; static const u64 T7[256] = { 0x016ab9bb68d2d3baULL, 0xb1669ae5194dfc54ULL, 0xcd1465e293bc712fULL, 0x511b8725b9cd9c74ULL, 0xa457a2f70251f553ULL, 0x03bed6d0b86b68d3ULL, 0x04b5ded6bd6f6bd2ULL, 0xfe8552b36429d74dULL, 0xad4abafd0d5df050ULL, 0x63e009cf268ae9acULL, 0x84961c09830e8a8dULL, 0x1a4d91a579c6dcbfULL, 0x4d37a73daddd9070ULL, 0xa35caaf10755f652ULL, 0xe117a47bc852b39aULL, 0xf98e5ab5612dd44cULL, 0xac200346658f23eaULL, 0x1184e6c4a67362d5ULL, 0xc268cc55f166a497ULL, 0x0da8c6dcb2636ed1ULL, 0x99d085aaffcc5533ULL, 0xaa41b2fb0859f351ULL, 0x9c0fe2c72a71ed5bULL, 0x55ae59f304a2f7a6ULL, 0x20c1befe815f7fdeULL, 0xe5a27aad753dd848ULL, 0x7fcc29d7329ae5a8ULL, 0xe80abc71c75eb699ULL, 0x3be696e0904b70dbULL, 0x9edb8dacfac85632ULL, 0x2215d19551e6c4b7ULL, 0xceaab3322bd719fcULL, 0x93734b7048ab38e3ULL, 0xfd3b8463dc42bf9eULL, 0xd052fc41ef7eae91ULL, 0xe61cac7dcd56b09bULL, 0x947843764daf3be2ULL, 0x0661b1bd6dd6d0bbULL, 0xdaf1329b5819c341ULL, 0x17e55779cba5b26eULL, 0x5cb341f90baef2a5ULL, 0x4b561680c00b40cbULL, 0x0cc27f67dab1bd6bULL, 0xcc7edc59fb6ea295ULL, 0x409f61e11fbefea1ULL, 0xe3c3cb1018eb08f3ULL, 0x302fe1814ffeceb1ULL, 0x0e16100c0a080602ULL, 0x5e672e92db1749ccULL, 0x663f6ea2f33751c4ULL, 0x53cfe84e6974271dULL, 0x6c9ca07844503c14ULL, 0x730e56b0e82b58c3ULL, 0x349a3f57f291a563ULL, 0x3ced9ee6954f73daULL, 0x8e35d2d33469e75dULL, 0x8023c2df3e61e15fULL, 0x2ed7aef28b5779dcULL, 0x6e48cf1394e9877dULL, 0x596c2694de134acdULL, 0x605edf1f9ee1817fULL, 0x9b04eac12f75ee5aULL, 0x19f34775c1adb46cULL, 0x893edad5316de45cULL, 0xffefeb080cfb04f7ULL, 0xf2472dd4be986a26ULL, 0xc7b7ab3824db1cffULL, 0xb9113b547e932aedULL, 0xa236134a6f8725e8ULL, 0xf4269c69d34eba9dULL, 0x10ee5f7fcea1b16fULL, 0x8d8b04038c028f8eULL, 0x4fe3c8567d642b19ULL, 0x479469e71abafda0ULL, 0xeaded31a17e70df0ULL, 0x98ba3c11971e8689ULL, 0x2d697822333c110fULL, 0x153138121b1c0907ULL, 0x6afd11c52986ecafULL, 0xdb9b8b2030cb10fbULL, 0x3858403028201808ULL, 0x6b97a87e41543f15ULL, 0x237f682e3934170dULL, 0x1c2c201814100c04ULL, 0x070b080605040301ULL, 0x21ab0745e98dac64ULL, 0x27cab6f8845b7cdfULL, 0x5f0d9729b3c59a76ULL, 0x7264ef0b80f98b79ULL, 0x29dca6f48e537addULL, 0xb3b2f58ec9f4473dULL, 0x628ab0744e583a16ULL, 0xbda4e582c3fc413fULL, 0x85fca5b2ebdc5937ULL, 0x1ef84f73c4a9b76dULL, 0xa895dd90d8e04838ULL, 0x0877a1b167ded6b9ULL, 0x442abf37a2d19573ULL, 0xa53d1b4c6a8326e9ULL, 0x8beab5bee1d45f35ULL, 0xb66d92e31c49ff55ULL, 0x4a3caf3ba8d99371ULL, 0x7c72ff078af18d7bULL, 0x839d140f860a898cULL, 0x4321b731a7d59672ULL, 0x9fb13417921a8588ULL, 0xf8e4e30e09ff07f6ULL, 0xd6334dfc82a87e2aULL, 0xbaafed84c6f8423eULL, 0x8728cad93b65e25eULL, 0xf54c25d2bb9c6927ULL, 0xcfc00a894305ca46ULL, 0x247460283c30140cULL, 0x26a00f43ec89af65ULL, 0x05df676dd5bdb868ULL, 0x3a8c2f5bf899a361ULL, 0x091d180a0f0c0503ULL, 0x7d1846bce2235ec1ULL, 0xb87b82ef1641f957ULL, 0x1899fecea97f67d6ULL, 0x35f086ec9a4376d9ULL, 0x9512facd257de858ULL, 0x32fb8eea9f4775d8ULL, 0x2fbd1749e385aa66ULL, 0x1f92f6c8ac7b64d7ULL, 0xa683cd9cd2e84e3aULL, 0x424b0e8acf0745c8ULL, 0xb4b9fd88ccf0443cULL, 0xdc90832635cf13faULL, 0xc563c453f462a796ULL, 0x52a551f501a6f4a7ULL, 0xef01b477c25ab598ULL, 0xbe1a33527b9729ecULL, 0x0f7ca9b762dad5b8ULL, 0x6f2276a8fc3b54c7ULL, 0x6df619c32c82efaeULL, 0x02d46f6bd0b9bb69ULL, 0xecbf62a77a31dd4bULL, 0x76d131dd3d96e0abULL, 0x78c721d1379ee6a9ULL, 0x28b61f4fe681a967ULL, 0x364e503c22281e0aULL, 0xc8cb028f4601c947ULL, 0xe4c8c3161def0bf2ULL, 0x2c03c1995beec2b5ULL, 0xee6b0dccaa886622ULL, 0x81497b6456b332e5ULL, 0xb00c235e719f2feeULL, 0x1d4699a37cc2dfbeULL, 0xd13845fa87ac7d2bULL, 0xa0e27c21bf3e9e81ULL, 0x7ea6906c5a483612ULL, 0xaef46c2db5369883ULL, 0x41f5d85a776c2d1bULL, 0x2a6270243638120eULL, 0xe96005caaf8c6523ULL, 0xf1f9fb0406f302f5ULL, 0xc6dd12834c09cf45ULL, 0xe77615c6a5846321ULL, 0x50713e9ed11f4fceULL, 0xe2a972ab7039db49ULL, 0xc4097de89cb0742cULL, 0xd58d9b2c3ac316f9ULL, 0x8854636e59bf37e6ULL, 0x251ed99354e2c7b6ULL, 0xd8255df088a07828ULL, 0x6581b8724b5c3917ULL, 0xa9ff642bb0329b82ULL, 0x46fed05c72682e1aULL, 0x96ac2c1d9d16808bULL, 0xc0bca33e21df1ffeULL, 0x91a7241b9812838aULL, 0x3f5348362d241b09ULL, 0x4540068cca0346c9ULL, 0xb2d84c35a1269487ULL, 0xf7984ab96b25d24eULL, 0x9d655b7c42a33ee1ULL, 0xca1f6de496b8722eULL, 0x8642736253b731e4ULL, 0x9a6e537a47a73de0ULL, 0xab2b0b40608b20ebULL, 0xd759f447ea7aad90ULL, 0x5bb849ff0eaaf1a4ULL, 0x5ad2f0446678221eULL, 0xbcce5c39ab2e9285ULL, 0x3d87275dfd9da060ULL, 0x0000000000000000ULL, 0xfb5a35deb1946f25ULL, 0xf6f2f30203f701f4ULL, 0xedd5db1c12e30ef1ULL, 0xcb75d45ffe6aa194ULL, 0x3145583a272c1d0bULL, 0x8f5f6b685cbb34e7ULL, 0x56108f23bcc99f75ULL, 0xb7072b58749b2cefULL, 0x8ce1bdb8e4d05c34ULL, 0x97c695a6f5c45331ULL, 0x168feec2a37761d4ULL, 0x0aa3cedab7676dd0ULL, 0xb5d34433a4229786ULL, 0x6755d7199be5827eULL, 0x64eb01c9238eeaadULL, 0xc9a1bb342ed31afdULL, 0xdf2e55f68da47b29ULL, 0x90cd9da0f0c05030ULL, 0xa188c59ad7ec4d3bULL, 0xfa308c65d946bc9fULL, 0xd286932a3fc715f8ULL, 0x68297eaef93f57c6ULL, 0x79ad986a5f4c3513ULL, 0x123a30141e180a06ULL, 0x1b27281e11140f05ULL, 0x613466a4f63352c5ULL, 0x77bb886655443311ULL, 0x58069f2fb6c19977ULL, 0x6943c71591ed847cULL, 0x7b79f7018ff58e7aULL, 0x756fe70d85fd8878ULL, 0x82f7adb4eed85a36ULL, 0x54c4e0486c70241cULL, 0xaf9ed596dde44b39ULL, 0x9219f2cb2079eb59ULL, 0x48e8c05078602818ULL, 0xbf708ae91345fa56ULL, 0x3e39f18d45f6c8b3ULL, 0x3724e9874afacdb0ULL, 0xfc513dd8b4906c24ULL, 0xe07d1dc0a0806020ULL, 0x3932f98b40f2cbb2ULL, 0xd94fe44be072ab92ULL, 0x4e8971ed15b6f8a3ULL, 0x7a134ebae7275dc0ULL, 0xc1d61a85490dcc44ULL, 0x33913751f795a662ULL, 0x70b0806050403010ULL, 0x2b08c99f5eeac1b4ULL, 0xbbc5543fae2a9184ULL, 0xd4e722975211c543ULL, 0xde44ec4de576a893ULL, 0x74055eb6ed2f5bc2ULL, 0xebb46aa17f35de4aULL, 0x145b81a973cedabdULL, 0x8a800c0589068c8fULL, 0xc30275ee99b4772dULL, 0x135089af76cad9bcULL, 0xf32d946fd64ab99cULL, 0x0bc97761dfb5be6aULL, 0xddfa3a9d5d1dc040ULL, 0x577a3698d41b4ccfULL, 0x498279eb10b2fba2ULL, 0xa7e97427ba3a9d80ULL, 0xf09342bf6e21d14fULL, 0x5dd9f842637c211fULL, 0x4c5d1e86c50f43caULL, 0x71da39db3892e3aaULL, 0xd3ec2a915715c642ULL }; static const u64 c[KHAZAD_ROUNDS + 1] = { 0xba542f7453d3d24dULL, 0x50ac8dbf70529a4cULL, 0xead597d133515ba6ULL, 0xde48a899db32b7fcULL, 0xe39e919be2bb416eULL, 0xa5cb6b95a1f3b102ULL, 0xccc41d14c363da5dULL, 0x5fdc7dcd7f5a6c5cULL, 0xf726ffede89d6f8eULL }; static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *key = (const __be32 *)in_key; int r; const u64 *S = T7; u64 K2, K1; /* key is supposed to be 32-bit aligned */ K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]); K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]); /* setup the encrypt key */ for (r = 0; r <= KHAZAD_ROUNDS; r++) { ctx->E[r] = T0[(int)(K1 >> 56) ] ^ T1[(int)(K1 >> 48) & 0xff] ^ T2[(int)(K1 >> 40) & 0xff] ^ T3[(int)(K1 >> 32) & 0xff] ^ T4[(int)(K1 >> 24) & 0xff] ^ T5[(int)(K1 >> 16) & 0xff] ^ T6[(int)(K1 >> 8) & 0xff] ^ T7[(int)(K1 ) & 0xff] ^ c[r] ^ K2; K2 = K1; K1 = ctx->E[r]; } /* Setup the decrypt key */ ctx->D[0] = ctx->E[KHAZAD_ROUNDS]; for (r = 1; r < KHAZAD_ROUNDS; r++) { K1 = ctx->E[KHAZAD_ROUNDS - r]; ctx->D[r] = T0[(int)S[(int)(K1 >> 56) ] & 0xff] ^ T1[(int)S[(int)(K1 >> 48) & 0xff] & 0xff] ^ T2[(int)S[(int)(K1 >> 40) & 0xff] & 0xff] ^ T3[(int)S[(int)(K1 >> 32) & 0xff] & 0xff] ^ T4[(int)S[(int)(K1 >> 24) & 0xff] & 0xff] ^ T5[(int)S[(int)(K1 >> 16) & 0xff] & 0xff] ^ T6[(int)S[(int)(K1 >> 8) & 0xff] & 0xff] ^ T7[(int)S[(int)(K1 ) & 0xff] & 0xff]; } ctx->D[KHAZAD_ROUNDS] = ctx->E[0]; return 0; } static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], u8 *ciphertext, const u8 *plaintext) { const __be64 *src = (const __be64 *)plaintext; __be64 *dst = (__be64 *)ciphertext; int r; u64 state; state = be64_to_cpu(*src) ^ roundKey[0]; for (r = 1; r < KHAZAD_ROUNDS; r++) { state = T0[(int)(state >> 56) ] ^ T1[(int)(state >> 48) & 0xff] ^ T2[(int)(state >> 40) & 0xff] ^ T3[(int)(state >> 32) & 0xff] ^ T4[(int)(state >> 24) & 0xff] ^ T5[(int)(state >> 16) & 0xff] ^ T6[(int)(state >> 8) & 0xff] ^ T7[(int)(state ) & 0xff] ^ roundKey[r]; } state = (T0[(int)(state >> 56) ] & 0xff00000000000000ULL) ^ (T1[(int)(state >> 48) & 0xff] & 0x00ff000000000000ULL) ^ (T2[(int)(state >> 40) & 0xff] & 0x0000ff0000000000ULL) ^ (T3[(int)(state >> 32) & 0xff] & 0x000000ff00000000ULL) ^ (T4[(int)(state >> 24) & 0xff] & 0x00000000ff000000ULL) ^ (T5[(int)(state >> 16) & 0xff] & 0x0000000000ff0000ULL) ^ (T6[(int)(state >> 8) & 0xff] & 0x000000000000ff00ULL) ^ (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ roundKey[KHAZAD_ROUNDS]; *dst = cpu_to_be64(state); } static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); khazad_crypt(ctx->E, dst, src); } static void khazad_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); khazad_crypt(ctx->D, dst, src); } static struct crypto_alg khazad_alg = { .cra_name = "khazad", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = KHAZAD_BLOCK_SIZE, .cra_ctxsize = sizeof (struct khazad_ctx), .cra_alignmask = 7, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(khazad_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = KHAZAD_KEY_SIZE, .cia_max_keysize = KHAZAD_KEY_SIZE, .cia_setkey = khazad_setkey, .cia_encrypt = khazad_encrypt, .cia_decrypt = khazad_decrypt } } }; static int __init khazad_mod_init(void) { int ret = 0; ret = crypto_register_alg(&khazad_alg); return ret; } static void __exit khazad_mod_fini(void) { crypto_unregister_alg(&khazad_alg); } module_init(khazad_mod_init); module_exit(khazad_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
gpl-2.0
InfinitiveOS-Devices/android_kernel_cyanogen_msm8916
drivers/iommu/msm_iommu-v1.c
251
37188
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/msm-bus.h> #include <linux/platform_device.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/iommu.h> #include <linux/msm-bus.h> #include <linux/clk.h> #include <linux/scatterlist.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/regulator/consumer.h> #include <linux/qcom_iommu.h> #include <asm/sizes.h> #include "msm_iommu_hw-v1.h" #include "msm_iommu_priv.h" #include "msm_iommu_perfmon.h" #include "msm_iommu_pagetable.h" #ifdef CONFIG_IOMMU_LPAE /* bitmap of the page sizes currently supported */ #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_2M | SZ_32M | SZ_1G) #else /* bitmap of the page sizes currently supported */ #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) #endif #define IOMMU_MSEC_STEP 10 #define IOMMU_MSEC_TIMEOUT 5000 static DEFINE_MUTEX(msm_iommu_lock); struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS]; static int __enable_regulators(struct msm_iommu_drvdata *drvdata) { int ret = 0; if (drvdata->gdsc) { ret = regulator_enable(drvdata->gdsc); if (ret) goto fail; if (drvdata->alt_gdsc) ret = regulator_enable(drvdata->alt_gdsc); if (ret) { regulator_disable(drvdata->gdsc); goto fail; } } ++drvdata->powered_on; fail: return ret; } static void __disable_regulators(struct msm_iommu_drvdata *drvdata) { if (drvdata->alt_gdsc) regulator_disable(drvdata->alt_gdsc); if (drvdata->gdsc) regulator_disable(drvdata->gdsc); --drvdata->powered_on; } static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote) { int ret = 0; if (drvdata->bus_client) { ret = msm_bus_scale_client_update_request(drvdata->bus_client, vote); if (ret) pr_err("%s: Failed to vote for bus: %d\n", __func__, vote); } return ret; } static int __enable_clocks(struct msm_iommu_drvdata *drvdata) { int ret; ret = clk_prepare_enable(drvdata->pclk); if (ret) goto fail; ret = clk_prepare_enable(drvdata->clk); if (ret) goto fail1; if (drvdata->aclk) { ret = clk_prepare_enable(drvdata->aclk); if (ret) goto fail2; } if (drvdata->aiclk) { ret = clk_prepare_enable(drvdata->aiclk); if (ret) goto fail3; } if (drvdata->clk_reg_virt) { unsigned int value; value = readl_relaxed(drvdata->clk_reg_virt); value &= ~0x1; writel_relaxed(value, drvdata->clk_reg_virt); /* Ensure clock is on before continuing */ mb(); } return 0; fail3: if (drvdata->aclk) clk_disable_unprepare(drvdata->aclk); fail2: clk_disable_unprepare(drvdata->clk); fail1: clk_disable_unprepare(drvdata->pclk); fail: return ret; } static void __disable_clocks(struct msm_iommu_drvdata *drvdata) { if (drvdata->aiclk) clk_disable_unprepare(drvdata->aiclk); if (drvdata->aclk) clk_disable_unprepare(drvdata->aclk); clk_disable_unprepare(drvdata->clk); clk_disable_unprepare(drvdata->pclk); } static void _iommu_lock_acquire(unsigned int need_extra_lock) { mutex_lock(&msm_iommu_lock); } static void _iommu_lock_release(unsigned int need_extra_lock) { mutex_unlock(&msm_iommu_lock); } struct iommu_access_ops iommu_access_ops_v1 = { .iommu_power_on = __enable_regulators, .iommu_power_off = __disable_regulators, .iommu_bus_vote = apply_bus_vote, .iommu_clk_on = __enable_clocks, .iommu_clk_off = __disable_clocks, .iommu_lock_acquire = _iommu_lock_acquire, .iommu_lock_release = _iommu_lock_release, }; #ifdef CONFIG_MSM_IOMMU_VBIF_CHECK #define VBIF_XIN_HALT_CTRL0 0x200 #define VBIF_XIN_HALT_CTRL1 0x204 #define VBIF_AXI_HALT_CTRL0 0x208 #define VBIF_AXI_HALT_CTRL1 0x20C static void __halt_vbif_xin(void __iomem *vbif_base) { pr_err("Halting VBIF_XIN\n"); writel_relaxed(0xFFFFFFFF, vbif_base + VBIF_XIN_HALT_CTRL0); } static void __dump_vbif_state(void __iomem *base, void __iomem *vbif_base) { unsigned int reg_val; reg_val = readl_relaxed(base + MICRO_MMU_CTRL); pr_err("Value of SMMU_IMPLDEF_MICRO_MMU_CTRL = 0x%x\n", reg_val); reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL0); pr_err("Value of VBIF_XIN_HALT_CTRL0 = 0x%x\n", reg_val); reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL1); pr_err("Value of VBIF_XIN_HALT_CTRL1 = 0x%x\n", reg_val); reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL0); pr_err("Value of VBIF_AXI_HALT_CTRL0 = 0x%x\n", reg_val); reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL1); pr_err("Value of VBIF_AXI_HALT_CTRL1 = 0x%x\n", reg_val); } static int __check_vbif_state(struct msm_iommu_drvdata const *drvdata) { phys_addr_t addr = (phys_addr_t) (drvdata->phys_base - (phys_addr_t) 0x4000); void __iomem *base = ioremap(addr, 0x1000); int ret = 0; if (base) { __dump_vbif_state(drvdata->base, base); __halt_vbif_xin(drvdata->base); __dump_vbif_state(drvdata->base, base); iounmap(base); } else { pr_err("%s: Unable to ioremap\n", __func__); ret = -ENOMEM; } return ret; } static void check_halt_state(struct msm_iommu_drvdata const *drvdata) { int res; unsigned int val; void __iomem *base = drvdata->base; char const *name = drvdata->name; pr_err("Timed out waiting for IOMMU halt to complete for %s\n", name); res = __check_vbif_state(drvdata); if (res) BUG(); pr_err("Checking if IOMMU halt completed for %s\n", name); res = readl_tight_poll_timeout( GLB_REG(MICRO_MMU_CTRL, base), val, (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 5000000); if (res) { pr_err("Timed out (again) waiting for IOMMU halt to complete for %s\n", name); } else { pr_err("IOMMU halt completed. VBIF FIFO most likely not getting drained by master\n"); } BUG(); } static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata, int ctx) { int res; unsigned int val; void __iomem *base = drvdata->base; char const *name = drvdata->name; pr_err("Timed out waiting for TLB SYNC to complete for %s\n", name); res = __check_vbif_state(drvdata); if (res) BUG(); pr_err("Checking if TLB sync completed for %s\n", name); res = readl_tight_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val, (val & CB_TLBSTATUS_SACTIVE) == 0, 5000000); if (res) { pr_err("Timed out (again) waiting for TLB SYNC to complete for %s\n", name); } else { pr_err("TLB Sync completed. VBIF FIFO most likely not getting drained by master\n"); } BUG(); } #else /* * For targets without VBIF or for targets with the VBIF check disabled * we directly just crash to capture the issue */ static void check_halt_state(struct msm_iommu_drvdata const *drvdata) { BUG(); } static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata, int ctx) { BUG(); } #endif void iommu_halt(struct msm_iommu_drvdata const *iommu_drvdata) { if (iommu_drvdata->halt_enabled) { unsigned int val; void __iomem *base = iommu_drvdata->base; int res; SET_MICRO_MMU_CTRL_HALT_REQ(base, 1); res = readl_tight_poll_timeout( GLB_REG(MICRO_MMU_CTRL, base), val, (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 5000000); if (res) check_halt_state(iommu_drvdata); /* Ensure device is idle before continuing */ mb(); } } void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata) { if (iommu_drvdata->halt_enabled) { /* * Ensure transactions have completed before releasing * the halt */ mb(); SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0); /* * Ensure write is complete before continuing to ensure * we don't turn off clocks while transaction is still * pending. */ mb(); } } static void __sync_tlb(struct msm_iommu_drvdata *iommu_drvdata, int ctx) { unsigned int val; unsigned int res; void __iomem *base = iommu_drvdata->cb_base; SET_TLBSYNC(base, ctx, 0); /* No barrier needed due to read dependency */ res = readl_tight_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val, (val & CB_TLBSTATUS_SACTIVE) == 0, 5000000); if (res) check_tlb_sync_state(iommu_drvdata, ctx); } static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va) { struct msm_iommu_priv *priv = domain->priv; struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; int ret = 0; list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent); iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); BUG_ON(!iommu_drvdata); ret = __enable_clocks(iommu_drvdata); if (ret) goto fail; SET_TLBIVA(iommu_drvdata->cb_base, ctx_drvdata->num, ctx_drvdata->asid | (va & CB_TLBIVA_VA)); mb(); __sync_tlb(iommu_drvdata, ctx_drvdata->num); __disable_clocks(iommu_drvdata); } fail: return ret; } static int __flush_iotlb(struct iommu_domain *domain) { struct msm_iommu_priv *priv = domain->priv; struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; int ret = 0; list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent); iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); BUG_ON(!iommu_drvdata); ret = __enable_clocks(iommu_drvdata); if (ret) goto fail; SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num, ctx_drvdata->asid); __sync_tlb(iommu_drvdata, ctx_drvdata->num); __disable_clocks(iommu_drvdata); } fail: return ret; } /* * May only be called for non-secure iommus */ static void __reset_iommu(struct msm_iommu_drvdata *iommu_drvdata) { int i, smt_size, res; unsigned long val; void __iomem *base = iommu_drvdata->base; /* SMMU_ACR is an implementation defined register. * Resetting is not required for some implementation. */ if (iommu_drvdata->model != MMU_500) SET_ACR(base, 0); SET_CR2(base, 0); SET_GFAR(base, 0); SET_GFSRRESTORE(base, 0); /* Invalidate the entire non-secure TLB */ SET_TLBIALLNSNH(base, 0); SET_TLBGSYNC(base, 0); res = readl_tight_poll_timeout(GLB_REG(TLBGSTATUS, base), val, (val & TLBGSTATUS_GSACTIVE) == 0, 5000000); if (res) BUG(); smt_size = GET_IDR0_NUMSMRG(base); for (i = 0; i < smt_size; i++) SET_SMR_VALID(base, i, 0); mb(); } static void __reset_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata) { void __iomem *base = iommu_drvdata->base; if (iommu_drvdata->model != MMU_500) SET_NSACR(base, 0); SET_NSCR2(base, 0); SET_NSGFAR(base, 0); SET_NSGFSRRESTORE(base, 0); mb(); } static void __program_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata) { void __iomem *base = iommu_drvdata->base; if (iommu_drvdata->model == MMU_500) { SET_NSACR_SMTNMC_BPTLBEN(base, 1); SET_NSACR_MMUDIS_BPTLBEN(base, 1); SET_NSACR_S2CR_BPTLBEN(base, 1); } SET_NSCR0_SMCFCFG(base, 1); SET_NSCR0_USFCFG(base, 1); SET_NSCR0_STALLD(base, 1); SET_NSCR0_GCFGFIE(base, 1); SET_NSCR0_GCFGFRE(base, 1); SET_NSCR0_GFIE(base, 1); SET_NSCR0_GFRE(base, 1); SET_NSCR0_CLIENTPD(base, 0); } /* * May only be called for non-secure iommus */ static void __program_iommu(struct msm_iommu_drvdata *drvdata) { __reset_iommu(drvdata); if (!msm_iommu_get_scm_call_avail()) __reset_iommu_secure(drvdata); if (drvdata->model == MMU_500) { SET_ACR_SMTNMC_BPTLBEN(drvdata->base, 1); SET_ACR_MMUDIS_BPTLBEN(drvdata->base, 1); SET_ACR_S2CR_BPTLBEN(drvdata->base, 1); } SET_CR0_SMCFCFG(drvdata->base, 1); SET_CR0_USFCFG(drvdata->base, 1); SET_CR0_STALLD(drvdata->base, 1); SET_CR0_GCFGFIE(drvdata->base, 1); SET_CR0_GCFGFRE(drvdata->base, 1); SET_CR0_GFIE(drvdata->base, 1); SET_CR0_GFRE(drvdata->base, 1); SET_CR0_CLIENTPD(drvdata->base, 0); if (!msm_iommu_get_scm_call_avail()) __program_iommu_secure(drvdata); if (drvdata->smmu_local_base) writel_relaxed(0xFFFFFFFF, drvdata->smmu_local_base + SMMU_INTR_SEL_NS); mb(); /* Make sure writes complete before returning */ } void program_iommu_bfb_settings(void __iomem *base, const struct msm_iommu_bfb_settings *bfb_settings) { unsigned int i; if (bfb_settings) for (i = 0; i < bfb_settings->length; i++) SET_GLOBAL_REG(base, bfb_settings->regs[i], bfb_settings->data[i]); mb(); /* Make sure writes complete before returning */ } static void __reset_context(struct msm_iommu_drvdata *iommu_drvdata, int ctx) { void __iomem *base = iommu_drvdata->cb_base; /* Don't set ACTLR to zero because if context bank is in * bypass mode (say after iommu_detach), still this ACTLR * value matters for micro-TLB caching. */ if (iommu_drvdata->model != MMU_500) SET_ACTLR(base, ctx, 0); SET_FAR(base, ctx, 0); SET_FSRRESTORE(base, ctx, 0); SET_NMRR(base, ctx, 0); SET_PAR(base, ctx, 0); SET_PRRR(base, ctx, 0); SET_SCTLR(base, ctx, 0); SET_TTBCR(base, ctx, 0); SET_TTBR0(base, ctx, 0); SET_TTBR1(base, ctx, 0); mb(); } static void __release_smg(void __iomem *base) { int i, smt_size; smt_size = GET_IDR0_NUMSMRG(base); /* Invalidate all SMGs */ for (i = 0; i < smt_size; i++) if (GET_SMR_VALID(base, i)) SET_SMR_VALID(base, i, 0); } #ifdef CONFIG_IOMMU_LPAE static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num, unsigned int asid) { SET_CB_TTBR0_ASID(base, ctx_num, asid); } #else static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num, unsigned int asid) { SET_CB_CONTEXTIDR_ASID(base, ctx_num, asid); } #endif static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata, struct msm_iommu_ctx_drvdata *curr_ctx, struct msm_iommu_priv *priv) { void __iomem *cb_base = iommu_drvdata->cb_base; curr_ctx->asid = curr_ctx->num; msm_iommu_set_ASID(cb_base, curr_ctx->num, curr_ctx->asid); } #ifdef CONFIG_IOMMU_LPAE static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx) { SET_CB_TTBCR_EAE(base, ctx, 1); /* Extended Address Enable (EAE) */ } static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx) { SET_CB_MAIR0(base, ctx, msm_iommu_get_mair0()); SET_CB_MAIR1(base, ctx, msm_iommu_get_mair1()); } static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx) { /* * Configure page tables as inner-cacheable and shareable to reduce * the TLB miss penalty. */ SET_CB_TTBCR_SH0(base, ctx, 3); /* Inner shareable */ SET_CB_TTBCR_ORGN0(base, ctx, 1); /* outer cachable*/ SET_CB_TTBCR_IRGN0(base, ctx, 1); /* inner cachable*/ SET_CB_TTBCR_T0SZ(base, ctx, 0); /* 0GB-4GB */ SET_CB_TTBCR_SH1(base, ctx, 3); /* Inner shareable */ SET_CB_TTBCR_ORGN1(base, ctx, 1); /* outer cachable*/ SET_CB_TTBCR_IRGN1(base, ctx, 1); /* inner cachable*/ SET_CB_TTBCR_T1SZ(base, ctx, 0); /* TTBR1 not used */ } #else static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx) { /* Turn on TEX Remap */ SET_CB_SCTLR_TRE(base, ctx, 1); } static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx) { SET_PRRR(base, ctx, msm_iommu_get_prrr()); SET_NMRR(base, ctx, msm_iommu_get_nmrr()); } static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx) { /* Configure page tables as inner-cacheable and shareable to reduce * the TLB miss penalty. */ SET_CB_TTBR0_S(base, ctx, 1); SET_CB_TTBR0_NOS(base, ctx, 1); SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */ SET_CB_TTBR0_IRGN0(base, ctx, 1); SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */ } #endif static int program_m2v_table(struct device *dev, void __iomem *base) { struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_get_drvdata(dev); u32 *sids = ctx_drvdata->sids; u32 *sid_mask = ctx_drvdata->sid_mask; unsigned int ctx = ctx_drvdata->num; int num = 0, i, smt_size; int len = ctx_drvdata->nsid; smt_size = GET_IDR0_NUMSMRG(base); /* Program the M2V tables for this context */ for (i = 0; i < len / sizeof(*sids); i++) { for (; num < smt_size; num++) if (GET_SMR_VALID(base, num) == 0) break; BUG_ON(num >= smt_size); SET_SMR_VALID(base, num, 1); SET_SMR_MASK(base, num, sid_mask[i]); SET_SMR_ID(base, num, sids[i]); SET_S2CR_N(base, num, 0); SET_S2CR_CBNDX(base, num, ctx); SET_S2CR_MEMATTR(base, num, 0x0A); /* Set security bit override to be Non-secure */ SET_S2CR_NSCFG(base, num, 3); } return 0; } static void program_all_m2v_tables(struct msm_iommu_drvdata *iommu_drvdata) { device_for_each_child(iommu_drvdata->dev, iommu_drvdata->base, program_m2v_table); } static void __program_context(struct msm_iommu_drvdata *iommu_drvdata, struct msm_iommu_ctx_drvdata *ctx_drvdata, struct msm_iommu_priv *priv, bool is_secure, bool program_m2v) { phys_addr_t pn; void __iomem *base = iommu_drvdata->base; void __iomem *cb_base = iommu_drvdata->cb_base; unsigned int ctx = ctx_drvdata->num; phys_addr_t pgtable = __pa(priv->pt.fl_table); __reset_context(iommu_drvdata, ctx); msm_iommu_setup_ctx(cb_base, ctx); if (priv->pt.redirect) msm_iommu_setup_pg_l2_redirect(cb_base, ctx); msm_iommu_setup_memory_remap(cb_base, ctx); pn = pgtable >> CB_TTBR0_ADDR_SHIFT; SET_CB_TTBR0_ADDR(cb_base, ctx, pn); /* Enable context fault interrupt */ SET_CB_SCTLR_CFIE(cb_base, ctx, 1); if (iommu_drvdata->model != MMU_500) { /* Redirect all cacheable requests to L2 slave port. */ SET_CB_ACTLR_BPRCISH(cb_base, ctx, 1); SET_CB_ACTLR_BPRCOSH(cb_base, ctx, 1); SET_CB_ACTLR_BPRCNSH(cb_base, ctx, 1); } /* Enable private ASID namespace */ SET_CB_SCTLR_ASIDPNE(cb_base, ctx, 1); if (!is_secure) { if (program_m2v) program_all_m2v_tables(iommu_drvdata); SET_CBAR_N(base, ctx, 0); /* Stage 1 Context with Stage 2 bypass */ SET_CBAR_TYPE(base, ctx, 1); /* Route page faults to the non-secure interrupt */ SET_CBAR_IRPTNDX(base, ctx, 1); /* Set VMID to non-secure HLOS */ SET_CBAR_VMID(base, ctx, 3); /* Bypass is treated as inner-shareable */ SET_CBAR_BPSHCFG(base, ctx, 2); /* Do not downgrade memory attributes */ SET_CBAR_MEMATTR(base, ctx, 0x0A); } msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv); /* Ensure that ASID assignment has completed before we use * ASID for TLB invalidation. Here, mb() is required because * both these registers are separated by more than 1KB. */ mb(); SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num, ctx_drvdata->asid); __sync_tlb(iommu_drvdata, ctx_drvdata->num); /* Enable the MMU */ SET_CB_SCTLR_M(cb_base, ctx, 1); mb(); } static int msm_iommu_domain_init(struct iommu_domain *domain, int flags) { struct msm_iommu_priv *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) goto fail_nomem; #ifdef CONFIG_IOMMU_PGTABLES_L2 priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE; #endif INIT_LIST_HEAD(&priv->list_attached); if (msm_iommu_pagetable_alloc(&priv->pt)) goto fail_nomem; domain->priv = priv; return 0; fail_nomem: kfree(priv); return -ENOMEM; } static void msm_iommu_domain_destroy(struct iommu_domain *domain) { struct msm_iommu_priv *priv; mutex_lock(&msm_iommu_lock); priv = domain->priv; domain->priv = NULL; if (priv) msm_iommu_pagetable_free(&priv->pt); kfree(priv); mutex_unlock(&msm_iommu_lock); } static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { struct msm_iommu_priv *priv; struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; struct msm_iommu_ctx_drvdata *tmp_drvdata; int ret = 0; int is_secure; bool set_m2v = false; mutex_lock(&msm_iommu_lock); priv = domain->priv; if (!priv || !dev) { ret = -EINVAL; goto unlock; } iommu_drvdata = dev_get_drvdata(dev->parent); ctx_drvdata = dev_get_drvdata(dev); if (!iommu_drvdata || !ctx_drvdata) { ret = -EINVAL; goto unlock; } ++ctx_drvdata->attach_count; if (ctx_drvdata->attach_count > 1) goto already_attached; if (!list_empty(&ctx_drvdata->attached_elm)) { ret = -EBUSY; goto unlock; } list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) if (tmp_drvdata == ctx_drvdata) { ret = -EBUSY; goto unlock; } is_secure = iommu_drvdata->sec_id != -1; ret = __enable_regulators(iommu_drvdata); if (ret) goto unlock; ret = apply_bus_vote(iommu_drvdata, 1); if (ret) goto unlock; ret = __enable_clocks(iommu_drvdata); if (ret) { __disable_regulators(iommu_drvdata); goto unlock; } /* We can only do this once */ if (!iommu_drvdata->ctx_attach_count) { if (!is_secure) { iommu_halt(iommu_drvdata); __program_iommu(iommu_drvdata); iommu_resume(iommu_drvdata); } else { ret = msm_iommu_sec_program_iommu( iommu_drvdata, ctx_drvdata); if (ret) { __disable_regulators(iommu_drvdata); __disable_clocks(iommu_drvdata); goto unlock; } } program_iommu_bfb_settings(iommu_drvdata->base, iommu_drvdata->bfb_settings); set_m2v = true; } iommu_halt(iommu_drvdata); __program_context(iommu_drvdata, ctx_drvdata, priv, is_secure, set_m2v); iommu_resume(iommu_drvdata); __disable_clocks(iommu_drvdata); list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); ctx_drvdata->attached_domain = domain; ++iommu_drvdata->ctx_attach_count; already_attached: mutex_unlock(&msm_iommu_lock); msm_iommu_attached(dev->parent); return ret; unlock: mutex_unlock(&msm_iommu_lock); return ret; } static void msm_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) { struct msm_iommu_priv *priv; struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; int ret; int is_secure; if (!dev) return; msm_iommu_detached(dev->parent); mutex_lock(&msm_iommu_lock); priv = domain->priv; if (!priv) goto unlock; iommu_drvdata = dev_get_drvdata(dev->parent); ctx_drvdata = dev_get_drvdata(dev); if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain) goto unlock; --ctx_drvdata->attach_count; BUG_ON(ctx_drvdata->attach_count < 0); if (ctx_drvdata->attach_count > 0) goto unlock; ret = __enable_clocks(iommu_drvdata); if (ret) goto unlock; is_secure = iommu_drvdata->sec_id != -1; SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num, ctx_drvdata->asid); __sync_tlb(iommu_drvdata, ctx_drvdata->num); ctx_drvdata->asid = -1; __reset_context(iommu_drvdata, ctx_drvdata->num); /* * Only reset the M2V tables on the very last detach */ if (!is_secure && iommu_drvdata->ctx_attach_count == 1) { iommu_halt(iommu_drvdata); __release_smg(iommu_drvdata->base); iommu_resume(iommu_drvdata); } __disable_clocks(iommu_drvdata); apply_bus_vote(iommu_drvdata, 0); __disable_regulators(iommu_drvdata); list_del_init(&ctx_drvdata->attached_elm); ctx_drvdata->attached_domain = NULL; BUG_ON(iommu_drvdata->ctx_attach_count == 0); --iommu_drvdata->ctx_attach_count; unlock: mutex_unlock(&msm_iommu_lock); } static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, phys_addr_t pa, size_t len, int prot) { struct msm_iommu_priv *priv; int ret = 0; mutex_lock(&msm_iommu_lock); priv = domain->priv; if (!priv) { ret = -EINVAL; goto fail; } ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot); if (ret) goto fail; #ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP ret = __flush_iotlb_va(domain, va); #endif fail: mutex_unlock(&msm_iommu_lock); return ret; } static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, size_t len) { struct msm_iommu_priv *priv; int ret = -ENODEV; mutex_lock(&msm_iommu_lock); priv = domain->priv; if (!priv) goto fail; ret = msm_iommu_pagetable_unmap(&priv->pt, va, len); if (ret < 0) goto fail; ret = __flush_iotlb_va(domain, va); msm_iommu_pagetable_free_tables(&priv->pt, va, len); fail: mutex_unlock(&msm_iommu_lock); /* the IOMMU API requires us to return how many bytes were unmapped */ len = ret ? 0 : len; return len; } static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va, struct scatterlist *sg, unsigned int len, int prot) { int ret; struct msm_iommu_priv *priv; mutex_lock(&msm_iommu_lock); priv = domain->priv; if (!priv) { ret = -EINVAL; goto fail; } ret = msm_iommu_pagetable_map_range(&priv->pt, va, sg, len, prot); if (ret) goto fail; #ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP __flush_iotlb(domain); #endif fail: mutex_unlock(&msm_iommu_lock); return ret; } static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va, unsigned int len) { struct msm_iommu_priv *priv; mutex_lock(&msm_iommu_lock); priv = domain->priv; msm_iommu_pagetable_unmap_range(&priv->pt, va, len); __flush_iotlb(domain); msm_iommu_pagetable_free_tables(&priv->pt, va, len); mutex_unlock(&msm_iommu_lock); return 0; } #ifdef CONFIG_IOMMU_LPAE static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par) { phys_addr_t phy; /* Upper 28 bits from PAR, lower 12 from VA */ phy = (par & 0xFFFFFFF000ULL) | (va & 0x00000FFF); return phy; } #else static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par) { phys_addr_t phy; /* We are dealing with a supersection */ if (par & CB_PAR_SS) phy = (par & 0xFF000000) | (va & 0x00FFFFFF); else /* Upper 20 bits from PAR, lower 12 from VA */ phy = (par & 0xFFFFF000) | (va & 0x00000FFF); return phy; } #endif static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, phys_addr_t va) { struct msm_iommu_priv *priv; struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; u64 par; void __iomem *base; phys_addr_t ret = 0; int ctx; int i; mutex_lock(&msm_iommu_lock); priv = domain->priv; if (list_empty(&priv->list_attached)) goto fail; ctx_drvdata = list_entry(priv->list_attached.next, struct msm_iommu_ctx_drvdata, attached_elm); iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); if (iommu_drvdata->model == MMU_500) { ret = msm_iommu_iova_to_phys_soft(domain, va); mutex_unlock(&msm_iommu_lock); return ret; } base = iommu_drvdata->cb_base; ctx = ctx_drvdata->num; ret = __enable_clocks(iommu_drvdata); if (ret) { ret = 0; /* 0 indicates translation failed */ goto fail; } SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR); mb(); for (i = 0; i < IOMMU_MSEC_TIMEOUT; i += IOMMU_MSEC_STEP) if (GET_CB_ATSR_ACTIVE(base, ctx) == 0) break; else msleep(IOMMU_MSEC_STEP); if (i >= IOMMU_MSEC_TIMEOUT) { pr_err("%s: iova to phys timed out on %pa for %s (%s)\n", __func__, &va, iommu_drvdata->name, ctx_drvdata->name); ret = 0; goto fail; } par = GET_PAR(base, ctx); __disable_clocks(iommu_drvdata); if (par & CB_PAR_F) { unsigned int level = (par & CB_PAR_PLVL) >> CB_PAR_PLVL_SHIFT; pr_err("IOMMU translation fault!\n"); pr_err("name = %s\n", iommu_drvdata->name); pr_err("context = %s (%d)\n", ctx_drvdata->name, ctx_drvdata->num); pr_err("Interesting registers:\n"); pr_err("PAR = %16llx [%s%s%s%s%s%s%s%sPLVL%u %s]\n", par, (par & CB_PAR_F) ? "F " : "", (par & CB_PAR_TF) ? "TF " : "", (par & CB_PAR_AFF) ? "AFF " : "", (par & CB_PAR_PF) ? "PF " : "", (par & CB_PAR_EF) ? "EF " : "", (par & CB_PAR_TLBMCF) ? "TLBMCF " : "", (par & CB_PAR_TLBLKF) ? "TLBLKF " : "", (par & CB_PAR_ATOT) ? "ATOT " : "", level, (par & CB_PAR_STAGE) ? "S2 " : "S1 "); ret = 0; } else { ret = msm_iommu_get_phy_from_PAR(va, par); } fail: mutex_unlock(&msm_iommu_lock); return ret; } static int msm_iommu_domain_has_cap(struct iommu_domain *domain, unsigned long cap) { return 0; } #ifdef CONFIG_IOMMU_LPAE static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[]) { pr_err("MAIR0 = %08x MAIR1 = %08x\n", regs[DUMP_REG_MAIR0].val, regs[DUMP_REG_MAIR1].val); } #else static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[]) { pr_err("PRRR = %08x NMRR = %08x\n", regs[DUMP_REG_PRRR].val, regs[DUMP_REG_NMRR].val); } #endif void print_ctx_regs(struct msm_iommu_context_reg regs[]) { uint32_t fsr = regs[DUMP_REG_FSR].val; u64 ttbr; enum dump_reg iter; pr_err("FAR = %016llx\n", COMBINE_DUMP_REG( regs[DUMP_REG_FAR1].val, regs[DUMP_REG_FAR0].val)); pr_err("PAR = %016llx\n", COMBINE_DUMP_REG( regs[DUMP_REG_PAR1].val, regs[DUMP_REG_PAR0].val)); pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr, (fsr & 0x02) ? "TF " : "", (fsr & 0x04) ? "AFF " : "", (fsr & 0x08) ? "PF " : "", (fsr & 0x10) ? "EF " : "", (fsr & 0x20) ? "TLBMCF " : "", (fsr & 0x40) ? "TLBLKF " : "", (fsr & 0x80) ? "MHF " : "", (fsr & 0x40000000) ? "SS " : "", (fsr & 0x80000000) ? "MULTI " : ""); pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", regs[DUMP_REG_FSYNR0].val, regs[DUMP_REG_FSYNR1].val); ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR0_1].val, regs[DUMP_REG_TTBR0_0].val); if (regs[DUMP_REG_TTBR0_1].valid) pr_err("TTBR0 = %016llx\n", ttbr); else pr_err("TTBR0 = %016llx (32b)\n", ttbr); ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR1_1].val, regs[DUMP_REG_TTBR1_0].val); if (regs[DUMP_REG_TTBR1_1].valid) pr_err("TTBR1 = %016llx\n", ttbr); else pr_err("TTBR1 = %016llx (32b)\n", ttbr); pr_err("SCTLR = %08x ACTLR = %08x\n", regs[DUMP_REG_SCTLR].val, regs[DUMP_REG_ACTLR].val); pr_err("CBAR = %08x CBFRSYNRA = %08x\n", regs[DUMP_REG_CBAR_N].val, regs[DUMP_REG_CBFRSYNRA_N].val); print_ctx_mem_attr_regs(regs); for (iter = DUMP_REG_FIRST; iter < MAX_DUMP_REGS; ++iter) if (!regs[iter].valid) pr_err("NOTE: Value actually unknown for %s\n", dump_regs_tbl[iter].name); } static void __print_ctx_regs(struct msm_iommu_drvdata *drvdata, int ctx, unsigned int fsr) { void __iomem *base = drvdata->base; void __iomem *cb_base = drvdata->cb_base; bool is_secure = drvdata->sec_id != -1; struct msm_iommu_context_reg regs[MAX_DUMP_REGS]; unsigned int i; memset(regs, 0, sizeof(regs)); for (i = DUMP_REG_FIRST; i < MAX_DUMP_REGS; ++i) { struct msm_iommu_context_reg *r = &regs[i]; unsigned long regaddr = dump_regs_tbl[i].reg_offset; if (is_secure && dump_regs_tbl[i].dump_reg_type != DRT_CTX_REG) { r->valid = 0; continue; } r->valid = 1; switch (dump_regs_tbl[i].dump_reg_type) { case DRT_CTX_REG: r->val = GET_CTX_REG(regaddr, cb_base, ctx); break; case DRT_GLOBAL_REG: r->val = GET_GLOBAL_REG(regaddr, base); break; case DRT_GLOBAL_REG_N: r->val = GET_GLOBAL_REG_N(regaddr, ctx, base); break; default: pr_info("Unknown dump_reg_type...\n"); r->valid = 0; break; } } print_ctx_regs(regs); } static void print_global_regs(void __iomem *base, unsigned int gfsr) { pr_err("GFAR = %016llx\n", GET_GFAR(base)); pr_err("GFSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", gfsr, (gfsr & 0x01) ? "ICF " : "", (gfsr & 0x02) ? "USF " : "", (gfsr & 0x04) ? "SMCF " : "", (gfsr & 0x08) ? "UCBF " : "", (gfsr & 0x10) ? "UCIF " : "", (gfsr & 0x20) ? "CAF " : "", (gfsr & 0x40) ? "EF " : "", (gfsr & 0x80) ? "PF " : "", (gfsr & 0x40000000) ? "SS " : "", (gfsr & 0x80000000) ? "MULTI " : ""); pr_err("GFSYNR0 = %08x\n", GET_GFSYNR0(base)); pr_err("GFSYNR1 = %08x\n", GET_GFSYNR1(base)); pr_err("GFSYNR2 = %08x\n", GET_GFSYNR2(base)); } irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct msm_iommu_drvdata *drvdata; unsigned int gfsr; int ret; mutex_lock(&msm_iommu_lock); BUG_ON(!pdev); drvdata = dev_get_drvdata(&pdev->dev); BUG_ON(!drvdata); if (!drvdata->powered_on) { pr_err("Unexpected IOMMU global fault !!\n"); pr_err("name = %s\n", drvdata->name); pr_err("Power is OFF. Can't read global fault information\n"); ret = IRQ_HANDLED; goto fail; } if (drvdata->sec_id != -1) { pr_err("NON-secure interrupt from secure %s\n", drvdata->name); ret = IRQ_HANDLED; goto fail; } ret = __enable_clocks(drvdata); if (ret) { ret = IRQ_NONE; goto fail; } gfsr = GET_GFSR(drvdata->base); if (gfsr) { pr_err("Unexpected %s global fault !!\n", drvdata->name); print_global_regs(drvdata->base, gfsr); SET_GFSR(drvdata->base, gfsr); ret = IRQ_HANDLED; } else ret = IRQ_NONE; __disable_clocks(drvdata); fail: mutex_unlock(&msm_iommu_lock); return ret; } irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct msm_iommu_drvdata *drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; unsigned int fsr; int ret; phys_addr_t pagetable_phys; u64 faulty_iova = 0; mutex_lock(&msm_iommu_lock); BUG_ON(!pdev); drvdata = dev_get_drvdata(pdev->dev.parent); BUG_ON(!drvdata); ctx_drvdata = dev_get_drvdata(&pdev->dev); BUG_ON(!ctx_drvdata); if (!drvdata->powered_on) { pr_err("Unexpected IOMMU page fault!\n"); pr_err("name = %s\n", drvdata->name); pr_err("Power is OFF. Unable to read page fault information\n"); /* * We cannot determine which context bank caused the issue so * we just return handled here to ensure IRQ handler code is * happy */ ret = IRQ_HANDLED; goto fail; } ret = __enable_clocks(drvdata); if (ret) { ret = IRQ_NONE; goto fail; } fsr = GET_FSR(drvdata->cb_base, ctx_drvdata->num); if (fsr) { if (!ctx_drvdata->attached_domain) { pr_err("Bad domain in interrupt handler\n"); ret = -ENOSYS; } else { faulty_iova = GET_FAR(drvdata->cb_base, ctx_drvdata->num); ret = report_iommu_fault(ctx_drvdata->attached_domain, &ctx_drvdata->pdev->dev, faulty_iova, 0); } if (ret == -ENOSYS) { pr_err("Unexpected IOMMU page fault!\n"); pr_err("name = %s\n", drvdata->name); pr_err("context = %s (%d)\n", ctx_drvdata->name, ctx_drvdata->num); pr_err("Interesting registers:\n"); __print_ctx_regs(drvdata, ctx_drvdata->num, fsr); if (ctx_drvdata->attached_domain) { pagetable_phys = msm_iommu_iova_to_phys_soft( ctx_drvdata->attached_domain, faulty_iova); pr_err("Page table in DDR shows PA = %x\n", (unsigned int) pagetable_phys); } } if (ret != -EBUSY) SET_FSR(drvdata->cb_base, ctx_drvdata->num, fsr); ret = IRQ_HANDLED; } else ret = IRQ_NONE; __disable_clocks(drvdata); fail: mutex_unlock(&msm_iommu_lock); return ret; } static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain) { struct msm_iommu_priv *priv = domain->priv; return __pa(priv->pt.fl_table); } #define DUMP_REG_INIT(dump_reg, cb_reg, mbp, drt) \ do { \ dump_regs_tbl[dump_reg].reg_offset = cb_reg; \ dump_regs_tbl[dump_reg].name = #cb_reg; \ dump_regs_tbl[dump_reg].must_be_present = mbp; \ dump_regs_tbl[dump_reg].dump_reg_type = drt; \ } while (0) static void msm_iommu_build_dump_regs_table(void) { DUMP_REG_INIT(DUMP_REG_FAR0, CB_FAR, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_FAR1, CB_FAR + 4, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_PAR0, CB_PAR, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_PAR1, CB_PAR + 4, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_FSR, CB_FSR, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_FSYNR0, CB_FSYNR0, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_FSYNR1, CB_FSYNR1, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_TTBR0_0, CB_TTBR0, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_TTBR0_1, CB_TTBR0 + 4, 0, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_TTBR1_0, CB_TTBR1, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_TTBR1_1, CB_TTBR1 + 4, 0, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_SCTLR, CB_SCTLR, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_ACTLR, CB_ACTLR, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_PRRR, CB_PRRR, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_NMRR, CB_NMRR, 1, DRT_CTX_REG); DUMP_REG_INIT(DUMP_REG_CBAR_N, CBAR, 1, DRT_GLOBAL_REG_N); DUMP_REG_INIT(DUMP_REG_CBFRSYNRA_N, CBFRSYNRA, 1, DRT_GLOBAL_REG_N); } static struct iommu_ops msm_iommu_ops = { .domain_init = msm_iommu_domain_init, .domain_destroy = msm_iommu_domain_destroy, .attach_dev = msm_iommu_attach_dev, .detach_dev = msm_iommu_detach_dev, .map = msm_iommu_map, .unmap = msm_iommu_unmap, .map_range = msm_iommu_map_range, .unmap_range = msm_iommu_unmap_range, .iova_to_phys = msm_iommu_iova_to_phys, .domain_has_cap = msm_iommu_domain_has_cap, .get_pt_base_addr = msm_iommu_get_pt_base_addr, .pgsize_bitmap = MSM_IOMMU_PGSIZES, }; static int __init msm_iommu_init(void) { msm_iommu_pagetable_init(); bus_set_iommu(&platform_bus_type, &msm_iommu_ops); msm_iommu_build_dump_regs_table(); return 0; } subsys_initcall(msm_iommu_init); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MSM SMMU v2 Driver");
gpl-2.0
javelinanddart/android_kernel_samsung_tasstmo
sound/soc/codecs/wm8753.c
507
55116
/* * wm8753.c -- WM8753 ALSA Soc Audio driver * * Copyright 2003 Wolfson Microelectronics PLC. * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * Notes: * The WM8753 is a low power, high quality stereo codec with integrated PCM * codec designed for portable digital telephony applications. * * Dual DAI:- * * This driver support 2 DAI PCM's. This makes the default PCM available for * HiFi audio (e.g. MP3, ogg) playback/capture and the other PCM available for * voice. * * Please note that the voice PCM can be connected directly to a Bluetooth * codec or GSM modem and thus cannot be read or written to, although it is * available to be configured with snd_hw_params(), etc and kcontrols in the * normal alsa manner. * * Fast DAI switching:- * * The driver can now fast switch between the DAI configurations via a * an alsa kcontrol. This allows the PCM to remain open. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <sound/initval.h> #include <sound/tlv.h> #include <asm/div64.h> #include "wm8753.h" static int caps_charge = 2000; module_param(caps_charge, int, 0); MODULE_PARM_DESC(caps_charge, "WM8753 cap charge time (msecs)"); static void wm8753_set_dai_mode(struct snd_soc_codec *codec, unsigned int mode); /* * wm8753 register cache * We can't read the WM8753 register space when we * are using 2 wire for device control, so we cache them instead. */ static const u16 wm8753_reg[] = { 0x0008, 0x0000, 0x000a, 0x000a, 0x0033, 0x0000, 0x0007, 0x00ff, 0x00ff, 0x000f, 0x000f, 0x007b, 0x0000, 0x0032, 0x0000, 0x00c3, 0x00c3, 0x00c0, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0055, 0x0005, 0x0050, 0x0055, 0x0050, 0x0055, 0x0050, 0x0055, 0x0079, 0x0079, 0x0079, 0x0079, 0x0079, 0x0000, 0x0000, 0x0000, 0x0000, 0x0097, 0x0097, 0x0000, 0x0004, 0x0000, 0x0083, 0x0024, 0x01ba, 0x0000, 0x0083, 0x0024, 0x01ba, 0x0000, 0x0000, 0x0000 }; /* codec private data */ struct wm8753_priv { unsigned int sysclk; unsigned int pcmclk; struct snd_soc_codec codec; u16 reg_cache[ARRAY_SIZE(wm8753_reg)]; }; /* * read wm8753 register cache */ static inline unsigned int wm8753_read_reg_cache(struct snd_soc_codec *codec, unsigned int reg) { u16 *cache = codec->reg_cache; if (reg < 1 || reg >= (ARRAY_SIZE(wm8753_reg) + 1)) return -1; return cache[reg - 1]; } /* * write wm8753 register cache */ static inline void wm8753_write_reg_cache(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { u16 *cache = codec->reg_cache; if (reg < 1 || reg >= (ARRAY_SIZE(wm8753_reg) + 1)) return; cache[reg - 1] = value; } /* * write to the WM8753 register space */ static int wm8753_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { u8 data[2]; /* data is * D15..D9 WM8753 register offset * D8...D0 register data */ data[0] = (reg << 1) | ((value >> 8) & 0x0001); data[1] = value & 0x00ff; wm8753_write_reg_cache(codec, reg, value); if (codec->hw_write(codec->control_data, data, 2) == 2) return 0; else return -EIO; } #define wm8753_reset(c) wm8753_write(c, WM8753_RESET, 0) /* * WM8753 Controls */ static const char *wm8753_base[] = {"Linear Control", "Adaptive Boost"}; static const char *wm8753_base_filter[] = {"130Hz @ 48kHz", "200Hz @ 48kHz", "100Hz @ 16kHz", "400Hz @ 48kHz", "100Hz @ 8kHz", "200Hz @ 8kHz"}; static const char *wm8753_treble[] = {"8kHz", "4kHz"}; static const char *wm8753_alc_func[] = {"Off", "Right", "Left", "Stereo"}; static const char *wm8753_ng_type[] = {"Constant PGA Gain", "Mute ADC Output"}; static const char *wm8753_3d_func[] = {"Capture", "Playback"}; static const char *wm8753_3d_uc[] = {"2.2kHz", "1.5kHz"}; static const char *wm8753_3d_lc[] = {"200Hz", "500Hz"}; static const char *wm8753_deemp[] = {"None", "32kHz", "44.1kHz", "48kHz"}; static const char *wm8753_mono_mix[] = {"Stereo", "Left", "Right", "Mono"}; static const char *wm8753_dac_phase[] = {"Non Inverted", "Inverted"}; static const char *wm8753_line_mix[] = {"Line 1 + 2", "Line 1 - 2", "Line 1", "Line 2"}; static const char *wm8753_mono_mux[] = {"Line Mix", "Rx Mix"}; static const char *wm8753_right_mux[] = {"Line 2", "Rx Mix"}; static const char *wm8753_left_mux[] = {"Line 1", "Rx Mix"}; static const char *wm8753_rxmsel[] = {"RXP - RXN", "RXP + RXN", "RXP", "RXN"}; static const char *wm8753_sidetone_mux[] = {"Left PGA", "Mic 1", "Mic 2", "Right PGA"}; static const char *wm8753_mono2_src[] = {"Inverted Mono 1", "Left", "Right", "Left + Right"}; static const char *wm8753_out3[] = {"VREF", "ROUT2", "Left + Right"}; static const char *wm8753_out4[] = {"VREF", "Capture ST", "LOUT2"}; static const char *wm8753_radcsel[] = {"PGA", "Line or RXP-RXN", "Sidetone"}; static const char *wm8753_ladcsel[] = {"PGA", "Line or RXP-RXN", "Line"}; static const char *wm8753_mono_adc[] = {"Stereo", "Analogue Mix Left", "Analogue Mix Right", "Digital Mono Mix"}; static const char *wm8753_adc_hp[] = {"3.4Hz @ 48kHz", "82Hz @ 16k", "82Hz @ 8kHz", "170Hz @ 8kHz"}; static const char *wm8753_adc_filter[] = {"HiFi", "Voice"}; static const char *wm8753_mic_sel[] = {"Mic 1", "Mic 2", "Mic 3"}; static const char *wm8753_dai_mode[] = {"DAI 0", "DAI 1", "DAI 2", "DAI 3"}; static const char *wm8753_dat_sel[] = {"Stereo", "Left ADC", "Right ADC", "Channel Swap"}; static const char *wm8753_rout2_phase[] = {"Non Inverted", "Inverted"}; static const struct soc_enum wm8753_enum[] = { SOC_ENUM_SINGLE(WM8753_BASS, 7, 2, wm8753_base), SOC_ENUM_SINGLE(WM8753_BASS, 4, 6, wm8753_base_filter), SOC_ENUM_SINGLE(WM8753_TREBLE, 6, 2, wm8753_treble), SOC_ENUM_SINGLE(WM8753_ALC1, 7, 4, wm8753_alc_func), SOC_ENUM_SINGLE(WM8753_NGATE, 1, 2, wm8753_ng_type), SOC_ENUM_SINGLE(WM8753_3D, 7, 2, wm8753_3d_func), SOC_ENUM_SINGLE(WM8753_3D, 6, 2, wm8753_3d_uc), SOC_ENUM_SINGLE(WM8753_3D, 5, 2, wm8753_3d_lc), SOC_ENUM_SINGLE(WM8753_DAC, 1, 4, wm8753_deemp), SOC_ENUM_SINGLE(WM8753_DAC, 4, 4, wm8753_mono_mix), SOC_ENUM_SINGLE(WM8753_DAC, 6, 2, wm8753_dac_phase), SOC_ENUM_SINGLE(WM8753_INCTL1, 3, 4, wm8753_line_mix), SOC_ENUM_SINGLE(WM8753_INCTL1, 2, 2, wm8753_mono_mux), SOC_ENUM_SINGLE(WM8753_INCTL1, 1, 2, wm8753_right_mux), SOC_ENUM_SINGLE(WM8753_INCTL1, 0, 2, wm8753_left_mux), SOC_ENUM_SINGLE(WM8753_INCTL2, 6, 4, wm8753_rxmsel), SOC_ENUM_SINGLE(WM8753_INCTL2, 4, 4, wm8753_sidetone_mux), SOC_ENUM_SINGLE(WM8753_OUTCTL, 7, 4, wm8753_mono2_src), SOC_ENUM_SINGLE(WM8753_OUTCTL, 0, 3, wm8753_out3), SOC_ENUM_SINGLE(WM8753_ADCTL2, 7, 3, wm8753_out4), SOC_ENUM_SINGLE(WM8753_ADCIN, 2, 3, wm8753_radcsel), SOC_ENUM_SINGLE(WM8753_ADCIN, 0, 3, wm8753_ladcsel), SOC_ENUM_SINGLE(WM8753_ADCIN, 4, 4, wm8753_mono_adc), SOC_ENUM_SINGLE(WM8753_ADC, 2, 4, wm8753_adc_hp), SOC_ENUM_SINGLE(WM8753_ADC, 4, 2, wm8753_adc_filter), SOC_ENUM_SINGLE(WM8753_MICBIAS, 6, 3, wm8753_mic_sel), SOC_ENUM_SINGLE(WM8753_IOCTL, 2, 4, wm8753_dai_mode), SOC_ENUM_SINGLE(WM8753_ADC, 7, 4, wm8753_dat_sel), SOC_ENUM_SINGLE(WM8753_OUTCTL, 2, 2, wm8753_rout2_phase), }; static int wm8753_get_dai(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); int mode = wm8753_read_reg_cache(codec, WM8753_IOCTL); ucontrol->value.integer.value[0] = (mode & 0xc) >> 2; return 0; } static int wm8753_set_dai(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); int mode = wm8753_read_reg_cache(codec, WM8753_IOCTL); if (((mode & 0xc) >> 2) == ucontrol->value.integer.value[0]) return 0; mode &= 0xfff3; mode |= (ucontrol->value.integer.value[0] << 2); wm8753_write(codec, WM8753_IOCTL, mode); wm8753_set_dai_mode(codec, ucontrol->value.integer.value[0]); return 1; } static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 300, 0); static const DECLARE_TLV_DB_SCALE(mic_preamp_tlv, 1200, 600, 0); static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1); static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1); static const unsigned int out_tlv[] = { TLV_DB_RANGE_HEAD(2), /* 0000000 - 0101111 = "Analogue mute" */ 0, 48, TLV_DB_SCALE_ITEM(-25500, 0, 0), 48, 127, TLV_DB_SCALE_ITEM(-7300, 100, 0), }; static const DECLARE_TLV_DB_SCALE(mix_tlv, -1500, 300, 0); static const DECLARE_TLV_DB_SCALE(voice_mix_tlv, -1200, 300, 0); static const DECLARE_TLV_DB_SCALE(pga_tlv, -1725, 75, 0); static const struct snd_kcontrol_new wm8753_snd_controls[] = { SOC_DOUBLE_R_TLV("PCM Volume", WM8753_LDAC, WM8753_RDAC, 0, 255, 0, dac_tlv), SOC_DOUBLE_R_TLV("ADC Capture Volume", WM8753_LADC, WM8753_RADC, 0, 255, 0, adc_tlv), SOC_DOUBLE_R_TLV("Headphone Playback Volume", WM8753_LOUT1V, WM8753_ROUT1V, 0, 127, 0, out_tlv), SOC_DOUBLE_R_TLV("Speaker Playback Volume", WM8753_LOUT2V, WM8753_ROUT2V, 0, 127, 0, out_tlv), SOC_SINGLE_TLV("Mono Playback Volume", WM8753_MOUTV, 0, 127, 0, out_tlv), SOC_DOUBLE_R_TLV("Bypass Playback Volume", WM8753_LOUTM1, WM8753_ROUTM1, 4, 7, 1, mix_tlv), SOC_DOUBLE_R_TLV("Sidetone Playback Volume", WM8753_LOUTM2, WM8753_ROUTM2, 4, 7, 1, mix_tlv), SOC_DOUBLE_R_TLV("Voice Playback Volume", WM8753_LOUTM2, WM8753_ROUTM2, 0, 7, 1, voice_mix_tlv), SOC_DOUBLE_R("Headphone Playback ZC Switch", WM8753_LOUT1V, WM8753_ROUT1V, 7, 1, 0), SOC_DOUBLE_R("Speaker Playback ZC Switch", WM8753_LOUT2V, WM8753_ROUT2V, 7, 1, 0), SOC_SINGLE_TLV("Mono Bypass Playback Volume", WM8753_MOUTM1, 4, 7, 1, mix_tlv), SOC_SINGLE_TLV("Mono Sidetone Playback Volume", WM8753_MOUTM2, 4, 7, 1, mix_tlv), SOC_SINGLE_TLV("Mono Voice Playback Volume", WM8753_MOUTM2, 0, 7, 1, voice_mix_tlv), SOC_SINGLE("Mono Playback ZC Switch", WM8753_MOUTV, 7, 1, 0), SOC_ENUM("Bass Boost", wm8753_enum[0]), SOC_ENUM("Bass Filter", wm8753_enum[1]), SOC_SINGLE("Bass Volume", WM8753_BASS, 0, 15, 1), SOC_SINGLE("Treble Volume", WM8753_TREBLE, 0, 15, 1), SOC_ENUM("Treble Cut-off", wm8753_enum[2]), SOC_DOUBLE_TLV("Sidetone Capture Volume", WM8753_RECMIX1, 0, 4, 7, 1, rec_mix_tlv), SOC_SINGLE_TLV("Voice Sidetone Capture Volume", WM8753_RECMIX2, 0, 7, 1, rec_mix_tlv), SOC_DOUBLE_R_TLV("Capture Volume", WM8753_LINVOL, WM8753_RINVOL, 0, 63, 0, pga_tlv), SOC_DOUBLE_R("Capture ZC Switch", WM8753_LINVOL, WM8753_RINVOL, 6, 1, 0), SOC_DOUBLE_R("Capture Switch", WM8753_LINVOL, WM8753_RINVOL, 7, 1, 1), SOC_ENUM("Capture Filter Select", wm8753_enum[23]), SOC_ENUM("Capture Filter Cut-off", wm8753_enum[24]), SOC_SINGLE("Capture Filter Switch", WM8753_ADC, 0, 1, 1), SOC_SINGLE("ALC Capture Target Volume", WM8753_ALC1, 0, 7, 0), SOC_SINGLE("ALC Capture Max Volume", WM8753_ALC1, 4, 7, 0), SOC_ENUM("ALC Capture Function", wm8753_enum[3]), SOC_SINGLE("ALC Capture ZC Switch", WM8753_ALC2, 8, 1, 0), SOC_SINGLE("ALC Capture Hold Time", WM8753_ALC2, 0, 15, 1), SOC_SINGLE("ALC Capture Decay Time", WM8753_ALC3, 4, 15, 1), SOC_SINGLE("ALC Capture Attack Time", WM8753_ALC3, 0, 15, 0), SOC_SINGLE("ALC Capture NG Threshold", WM8753_NGATE, 3, 31, 0), SOC_ENUM("ALC Capture NG Type", wm8753_enum[4]), SOC_SINGLE("ALC Capture NG Switch", WM8753_NGATE, 0, 1, 0), SOC_ENUM("3D Function", wm8753_enum[5]), SOC_ENUM("3D Upper Cut-off", wm8753_enum[6]), SOC_ENUM("3D Lower Cut-off", wm8753_enum[7]), SOC_SINGLE("3D Volume", WM8753_3D, 1, 15, 0), SOC_SINGLE("3D Switch", WM8753_3D, 0, 1, 0), SOC_SINGLE("Capture 6dB Attenuate", WM8753_ADCTL1, 2, 1, 0), SOC_SINGLE("Playback 6dB Attenuate", WM8753_ADCTL1, 1, 1, 0), SOC_ENUM("De-emphasis", wm8753_enum[8]), SOC_ENUM("Playback Mono Mix", wm8753_enum[9]), SOC_ENUM("Playback Phase", wm8753_enum[10]), SOC_SINGLE_TLV("Mic2 Capture Volume", WM8753_INCTL1, 7, 3, 0, mic_preamp_tlv), SOC_SINGLE_TLV("Mic1 Capture Volume", WM8753_INCTL1, 5, 3, 0, mic_preamp_tlv), SOC_ENUM_EXT("DAI Mode", wm8753_enum[26], wm8753_get_dai, wm8753_set_dai), SOC_ENUM("ADC Data Select", wm8753_enum[27]), SOC_ENUM("ROUT2 Phase", wm8753_enum[28]), }; /* * _DAPM_ Controls */ /* Left Mixer */ static const struct snd_kcontrol_new wm8753_left_mixer_controls[] = { SOC_DAPM_SINGLE("Voice Playback Switch", WM8753_LOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Sidetone Playback Switch", WM8753_LOUTM2, 7, 1, 0), SOC_DAPM_SINGLE("Left Playback Switch", WM8753_LOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Bypass Playback Switch", WM8753_LOUTM1, 7, 1, 0), }; /* Right mixer */ static const struct snd_kcontrol_new wm8753_right_mixer_controls[] = { SOC_DAPM_SINGLE("Voice Playback Switch", WM8753_ROUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Sidetone Playback Switch", WM8753_ROUTM2, 7, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8753_ROUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Bypass Playback Switch", WM8753_ROUTM1, 7, 1, 0), }; /* Mono mixer */ static const struct snd_kcontrol_new wm8753_mono_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8753_MOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8753_MOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Voice Playback Switch", WM8753_MOUTM2, 3, 1, 0), SOC_DAPM_SINGLE("Sidetone Playback Switch", WM8753_MOUTM2, 7, 1, 0), SOC_DAPM_SINGLE("Bypass Playback Switch", WM8753_MOUTM1, 7, 1, 0), }; /* Mono 2 Mux */ static const struct snd_kcontrol_new wm8753_mono2_controls = SOC_DAPM_ENUM("Route", wm8753_enum[17]); /* Out 3 Mux */ static const struct snd_kcontrol_new wm8753_out3_controls = SOC_DAPM_ENUM("Route", wm8753_enum[18]); /* Out 4 Mux */ static const struct snd_kcontrol_new wm8753_out4_controls = SOC_DAPM_ENUM("Route", wm8753_enum[19]); /* ADC Mono Mix */ static const struct snd_kcontrol_new wm8753_adc_mono_controls = SOC_DAPM_ENUM("Route", wm8753_enum[22]); /* Record mixer */ static const struct snd_kcontrol_new wm8753_record_mixer_controls[] = { SOC_DAPM_SINGLE("Voice Capture Switch", WM8753_RECMIX2, 3, 1, 0), SOC_DAPM_SINGLE("Left Capture Switch", WM8753_RECMIX1, 3, 1, 0), SOC_DAPM_SINGLE("Right Capture Switch", WM8753_RECMIX1, 7, 1, 0), }; /* Left ADC mux */ static const struct snd_kcontrol_new wm8753_adc_left_controls = SOC_DAPM_ENUM("Route", wm8753_enum[21]); /* Right ADC mux */ static const struct snd_kcontrol_new wm8753_adc_right_controls = SOC_DAPM_ENUM("Route", wm8753_enum[20]); /* MIC mux */ static const struct snd_kcontrol_new wm8753_mic_mux_controls = SOC_DAPM_ENUM("Route", wm8753_enum[16]); /* ALC mixer */ static const struct snd_kcontrol_new wm8753_alc_mixer_controls[] = { SOC_DAPM_SINGLE("Line Capture Switch", WM8753_INCTL2, 3, 1, 0), SOC_DAPM_SINGLE("Mic2 Capture Switch", WM8753_INCTL2, 2, 1, 0), SOC_DAPM_SINGLE("Mic1 Capture Switch", WM8753_INCTL2, 1, 1, 0), SOC_DAPM_SINGLE("Rx Capture Switch", WM8753_INCTL2, 0, 1, 0), }; /* Left Line mux */ static const struct snd_kcontrol_new wm8753_line_left_controls = SOC_DAPM_ENUM("Route", wm8753_enum[14]); /* Right Line mux */ static const struct snd_kcontrol_new wm8753_line_right_controls = SOC_DAPM_ENUM("Route", wm8753_enum[13]); /* Mono Line mux */ static const struct snd_kcontrol_new wm8753_line_mono_controls = SOC_DAPM_ENUM("Route", wm8753_enum[12]); /* Line mux and mixer */ static const struct snd_kcontrol_new wm8753_line_mux_mix_controls = SOC_DAPM_ENUM("Route", wm8753_enum[11]); /* Rx mux and mixer */ static const struct snd_kcontrol_new wm8753_rx_mux_mix_controls = SOC_DAPM_ENUM("Route", wm8753_enum[15]); /* Mic Selector Mux */ static const struct snd_kcontrol_new wm8753_mic_sel_mux_controls = SOC_DAPM_ENUM("Route", wm8753_enum[25]); static const struct snd_soc_dapm_widget wm8753_dapm_widgets[] = { SND_SOC_DAPM_MICBIAS("Mic Bias", WM8753_PWR1, 5, 0), SND_SOC_DAPM_MIXER("Left Mixer", WM8753_PWR4, 0, 0, &wm8753_left_mixer_controls[0], ARRAY_SIZE(wm8753_left_mixer_controls)), SND_SOC_DAPM_PGA("Left Out 1", WM8753_PWR3, 8, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 2", WM8753_PWR3, 6, 0, NULL, 0), SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback", WM8753_PWR1, 3, 0), SND_SOC_DAPM_OUTPUT("LOUT1"), SND_SOC_DAPM_OUTPUT("LOUT2"), SND_SOC_DAPM_MIXER("Right Mixer", WM8753_PWR4, 1, 0, &wm8753_right_mixer_controls[0], ARRAY_SIZE(wm8753_right_mixer_controls)), SND_SOC_DAPM_PGA("Right Out 1", WM8753_PWR3, 7, 0, NULL, 0), SND_SOC_DAPM_PGA("Right Out 2", WM8753_PWR3, 5, 0, NULL, 0), SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback", WM8753_PWR1, 2, 0), SND_SOC_DAPM_OUTPUT("ROUT1"), SND_SOC_DAPM_OUTPUT("ROUT2"), SND_SOC_DAPM_MIXER("Mono Mixer", WM8753_PWR4, 2, 0, &wm8753_mono_mixer_controls[0], ARRAY_SIZE(wm8753_mono_mixer_controls)), SND_SOC_DAPM_PGA("Mono Out 1", WM8753_PWR3, 2, 0, NULL, 0), SND_SOC_DAPM_PGA("Mono Out 2", WM8753_PWR3, 1, 0, NULL, 0), SND_SOC_DAPM_DAC("Voice DAC", "Voice Playback", WM8753_PWR1, 4, 0), SND_SOC_DAPM_OUTPUT("MONO1"), SND_SOC_DAPM_MUX("Mono 2 Mux", SND_SOC_NOPM, 0, 0, &wm8753_mono2_controls), SND_SOC_DAPM_OUTPUT("MONO2"), SND_SOC_DAPM_MIXER("Out3 Left + Right", -1, 0, 0, NULL, 0), SND_SOC_DAPM_MUX("Out3 Mux", SND_SOC_NOPM, 0, 0, &wm8753_out3_controls), SND_SOC_DAPM_PGA("Out 3", WM8753_PWR3, 4, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("OUT3"), SND_SOC_DAPM_MUX("Out4 Mux", SND_SOC_NOPM, 0, 0, &wm8753_out4_controls), SND_SOC_DAPM_PGA("Out 4", WM8753_PWR3, 3, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("OUT4"), SND_SOC_DAPM_MIXER("Playback Mixer", WM8753_PWR4, 3, 0, &wm8753_record_mixer_controls[0], ARRAY_SIZE(wm8753_record_mixer_controls)), SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8753_PWR2, 3, 0), SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8753_PWR2, 2, 0), SND_SOC_DAPM_MUX("Capture Left Mixer", SND_SOC_NOPM, 0, 0, &wm8753_adc_mono_controls), SND_SOC_DAPM_MUX("Capture Right Mixer", SND_SOC_NOPM, 0, 0, &wm8753_adc_mono_controls), SND_SOC_DAPM_MUX("Capture Left Mux", SND_SOC_NOPM, 0, 0, &wm8753_adc_left_controls), SND_SOC_DAPM_MUX("Capture Right Mux", SND_SOC_NOPM, 0, 0, &wm8753_adc_right_controls), SND_SOC_DAPM_MUX("Mic Sidetone Mux", SND_SOC_NOPM, 0, 0, &wm8753_mic_mux_controls), SND_SOC_DAPM_PGA("Left Capture Volume", WM8753_PWR2, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("Right Capture Volume", WM8753_PWR2, 4, 0, NULL, 0), SND_SOC_DAPM_MIXER("ALC Mixer", WM8753_PWR2, 6, 0, &wm8753_alc_mixer_controls[0], ARRAY_SIZE(wm8753_alc_mixer_controls)), SND_SOC_DAPM_MUX("Line Left Mux", SND_SOC_NOPM, 0, 0, &wm8753_line_left_controls), SND_SOC_DAPM_MUX("Line Right Mux", SND_SOC_NOPM, 0, 0, &wm8753_line_right_controls), SND_SOC_DAPM_MUX("Line Mono Mux", SND_SOC_NOPM, 0, 0, &wm8753_line_mono_controls), SND_SOC_DAPM_MUX("Line Mixer", WM8753_PWR2, 0, 0, &wm8753_line_mux_mix_controls), SND_SOC_DAPM_MUX("Rx Mixer", WM8753_PWR2, 1, 0, &wm8753_rx_mux_mix_controls), SND_SOC_DAPM_PGA("Mic 1 Volume", WM8753_PWR2, 8, 0, NULL, 0), SND_SOC_DAPM_PGA("Mic 2 Volume", WM8753_PWR2, 7, 0, NULL, 0), SND_SOC_DAPM_MUX("Mic Selection Mux", SND_SOC_NOPM, 0, 0, &wm8753_mic_sel_mux_controls), SND_SOC_DAPM_INPUT("LINE1"), SND_SOC_DAPM_INPUT("LINE2"), SND_SOC_DAPM_INPUT("RXP"), SND_SOC_DAPM_INPUT("RXN"), SND_SOC_DAPM_INPUT("ACIN"), SND_SOC_DAPM_OUTPUT("ACOP"), SND_SOC_DAPM_INPUT("MIC1N"), SND_SOC_DAPM_INPUT("MIC1"), SND_SOC_DAPM_INPUT("MIC2N"), SND_SOC_DAPM_INPUT("MIC2"), SND_SOC_DAPM_VMID("VREF"), }; static const struct snd_soc_dapm_route audio_map[] = { /* left mixer */ {"Left Mixer", "Left Playback Switch", "Left DAC"}, {"Left Mixer", "Voice Playback Switch", "Voice DAC"}, {"Left Mixer", "Sidetone Playback Switch", "Mic Sidetone Mux"}, {"Left Mixer", "Bypass Playback Switch", "Line Left Mux"}, /* right mixer */ {"Right Mixer", "Right Playback Switch", "Right DAC"}, {"Right Mixer", "Voice Playback Switch", "Voice DAC"}, {"Right Mixer", "Sidetone Playback Switch", "Mic Sidetone Mux"}, {"Right Mixer", "Bypass Playback Switch", "Line Right Mux"}, /* mono mixer */ {"Mono Mixer", "Voice Playback Switch", "Voice DAC"}, {"Mono Mixer", "Left Playback Switch", "Left DAC"}, {"Mono Mixer", "Right Playback Switch", "Right DAC"}, {"Mono Mixer", "Sidetone Playback Switch", "Mic Sidetone Mux"}, {"Mono Mixer", "Bypass Playback Switch", "Line Mono Mux"}, /* left out */ {"Left Out 1", NULL, "Left Mixer"}, {"Left Out 2", NULL, "Left Mixer"}, {"LOUT1", NULL, "Left Out 1"}, {"LOUT2", NULL, "Left Out 2"}, /* right out */ {"Right Out 1", NULL, "Right Mixer"}, {"Right Out 2", NULL, "Right Mixer"}, {"ROUT1", NULL, "Right Out 1"}, {"ROUT2", NULL, "Right Out 2"}, /* mono 1 out */ {"Mono Out 1", NULL, "Mono Mixer"}, {"MONO1", NULL, "Mono Out 1"}, /* mono 2 out */ {"Mono 2 Mux", "Left + Right", "Out3 Left + Right"}, {"Mono 2 Mux", "Inverted Mono 1", "MONO1"}, {"Mono 2 Mux", "Left", "Left Mixer"}, {"Mono 2 Mux", "Right", "Right Mixer"}, {"Mono Out 2", NULL, "Mono 2 Mux"}, {"MONO2", NULL, "Mono Out 2"}, /* out 3 */ {"Out3 Left + Right", NULL, "Left Mixer"}, {"Out3 Left + Right", NULL, "Right Mixer"}, {"Out3 Mux", "VREF", "VREF"}, {"Out3 Mux", "Left + Right", "Out3 Left + Right"}, {"Out3 Mux", "ROUT2", "ROUT2"}, {"Out 3", NULL, "Out3 Mux"}, {"OUT3", NULL, "Out 3"}, /* out 4 */ {"Out4 Mux", "VREF", "VREF"}, {"Out4 Mux", "Capture ST", "Playback Mixer"}, {"Out4 Mux", "LOUT2", "LOUT2"}, {"Out 4", NULL, "Out4 Mux"}, {"OUT4", NULL, "Out 4"}, /* record mixer */ {"Playback Mixer", "Left Capture Switch", "Left Mixer"}, {"Playback Mixer", "Voice Capture Switch", "Mono Mixer"}, {"Playback Mixer", "Right Capture Switch", "Right Mixer"}, /* Mic/SideTone Mux */ {"Mic Sidetone Mux", "Left PGA", "Left Capture Volume"}, {"Mic Sidetone Mux", "Right PGA", "Right Capture Volume"}, {"Mic Sidetone Mux", "Mic 1", "Mic 1 Volume"}, {"Mic Sidetone Mux", "Mic 2", "Mic 2 Volume"}, /* Capture Left Mux */ {"Capture Left Mux", "PGA", "Left Capture Volume"}, {"Capture Left Mux", "Line or RXP-RXN", "Line Left Mux"}, {"Capture Left Mux", "Line", "LINE1"}, /* Capture Right Mux */ {"Capture Right Mux", "PGA", "Right Capture Volume"}, {"Capture Right Mux", "Line or RXP-RXN", "Line Right Mux"}, {"Capture Right Mux", "Sidetone", "Playback Mixer"}, /* Mono Capture mixer-mux */ {"Capture Right Mixer", "Stereo", "Capture Right Mux"}, {"Capture Left Mixer", "Stereo", "Capture Left Mux"}, {"Capture Left Mixer", "Analogue Mix Left", "Capture Left Mux"}, {"Capture Left Mixer", "Analogue Mix Left", "Capture Right Mux"}, {"Capture Right Mixer", "Analogue Mix Right", "Capture Left Mux"}, {"Capture Right Mixer", "Analogue Mix Right", "Capture Right Mux"}, {"Capture Left Mixer", "Digital Mono Mix", "Capture Left Mux"}, {"Capture Left Mixer", "Digital Mono Mix", "Capture Right Mux"}, {"Capture Right Mixer", "Digital Mono Mix", "Capture Left Mux"}, {"Capture Right Mixer", "Digital Mono Mix", "Capture Right Mux"}, /* ADC */ {"Left ADC", NULL, "Capture Left Mixer"}, {"Right ADC", NULL, "Capture Right Mixer"}, /* Left Capture Volume */ {"Left Capture Volume", NULL, "ACIN"}, /* Right Capture Volume */ {"Right Capture Volume", NULL, "Mic 2 Volume"}, /* ALC Mixer */ {"ALC Mixer", "Line Capture Switch", "Line Mixer"}, {"ALC Mixer", "Mic2 Capture Switch", "Mic 2 Volume"}, {"ALC Mixer", "Mic1 Capture Switch", "Mic 1 Volume"}, {"ALC Mixer", "Rx Capture Switch", "Rx Mixer"}, /* Line Left Mux */ {"Line Left Mux", "Line 1", "LINE1"}, {"Line Left Mux", "Rx Mix", "Rx Mixer"}, /* Line Right Mux */ {"Line Right Mux", "Line 2", "LINE2"}, {"Line Right Mux", "Rx Mix", "Rx Mixer"}, /* Line Mono Mux */ {"Line Mono Mux", "Line Mix", "Line Mixer"}, {"Line Mono Mux", "Rx Mix", "Rx Mixer"}, /* Line Mixer/Mux */ {"Line Mixer", "Line 1 + 2", "LINE1"}, {"Line Mixer", "Line 1 - 2", "LINE1"}, {"Line Mixer", "Line 1 + 2", "LINE2"}, {"Line Mixer", "Line 1 - 2", "LINE2"}, {"Line Mixer", "Line 1", "LINE1"}, {"Line Mixer", "Line 2", "LINE2"}, /* Rx Mixer/Mux */ {"Rx Mixer", "RXP - RXN", "RXP"}, {"Rx Mixer", "RXP + RXN", "RXP"}, {"Rx Mixer", "RXP - RXN", "RXN"}, {"Rx Mixer", "RXP + RXN", "RXN"}, {"Rx Mixer", "RXP", "RXP"}, {"Rx Mixer", "RXN", "RXN"}, /* Mic 1 Volume */ {"Mic 1 Volume", NULL, "MIC1N"}, {"Mic 1 Volume", NULL, "Mic Selection Mux"}, /* Mic 2 Volume */ {"Mic 2 Volume", NULL, "MIC2N"}, {"Mic 2 Volume", NULL, "MIC2"}, /* Mic Selector Mux */ {"Mic Selection Mux", "Mic 1", "MIC1"}, {"Mic Selection Mux", "Mic 2", "MIC2N"}, {"Mic Selection Mux", "Mic 3", "MIC2"}, /* ACOP */ {"ACOP", NULL, "ALC Mixer"}, }; static int wm8753_add_widgets(struct snd_soc_codec *codec) { snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets, ARRAY_SIZE(wm8753_dapm_widgets)); snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); snd_soc_dapm_new_widgets(codec); return 0; } /* PLL divisors */ struct _pll_div { u32 div2:1; u32 n:4; u32 k:24; }; /* The size in bits of the pll divide multiplied by 10 * to allow rounding later */ #define FIXED_PLL_SIZE ((1 << 22) * 10) static void pll_factors(struct _pll_div *pll_div, unsigned int target, unsigned int source) { u64 Kpart; unsigned int K, Ndiv, Nmod; Ndiv = target / source; if (Ndiv < 6) { source >>= 1; pll_div->div2 = 1; Ndiv = target / source; } else pll_div->div2 = 0; if ((Ndiv < 6) || (Ndiv > 12)) printk(KERN_WARNING "wm8753: unsupported N = %u\n", Ndiv); pll_div->n = Ndiv; Nmod = target % source; Kpart = FIXED_PLL_SIZE * (long long)Nmod; do_div(Kpart, source); K = Kpart & 0xFFFFFFFF; /* Check if we need to round */ if ((K % 10) >= 5) K += 5; /* Move down to proper range now rounding is done */ K /= 10; pll_div->k = K; } static int wm8753_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id, unsigned int freq_in, unsigned int freq_out) { u16 reg, enable; int offset; struct snd_soc_codec *codec = codec_dai->codec; if (pll_id < WM8753_PLL1 || pll_id > WM8753_PLL2) return -ENODEV; if (pll_id == WM8753_PLL1) { offset = 0; enable = 0x10; reg = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xffef; } else { offset = 4; enable = 0x8; reg = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xfff7; } if (!freq_in || !freq_out) { /* disable PLL */ wm8753_write(codec, WM8753_PLL1CTL1 + offset, 0x0026); wm8753_write(codec, WM8753_CLOCK, reg); return 0; } else { u16 value = 0; struct _pll_div pll_div; pll_factors(&pll_div, freq_out * 8, freq_in); /* set up N and K PLL divisor ratios */ /* bits 8:5 = PLL_N, bits 3:0 = PLL_K[21:18] */ value = (pll_div.n << 5) + ((pll_div.k & 0x3c0000) >> 18); wm8753_write(codec, WM8753_PLL1CTL2 + offset, value); /* bits 8:0 = PLL_K[17:9] */ value = (pll_div.k & 0x03fe00) >> 9; wm8753_write(codec, WM8753_PLL1CTL3 + offset, value); /* bits 8:0 = PLL_K[8:0] */ value = pll_div.k & 0x0001ff; wm8753_write(codec, WM8753_PLL1CTL4 + offset, value); /* set PLL as input and enable */ wm8753_write(codec, WM8753_PLL1CTL1 + offset, 0x0027 | (pll_div.div2 << 3)); wm8753_write(codec, WM8753_CLOCK, reg | enable); } return 0; } struct _coeff_div { u32 mclk; u32 rate; u8 sr:5; u8 usb:1; }; /* codec hifi mclk (after PLL) clock divider coefficients */ static const struct _coeff_div coeff_div[] = { /* 8k */ {12288000, 8000, 0x6, 0x0}, {11289600, 8000, 0x16, 0x0}, {18432000, 8000, 0x7, 0x0}, {16934400, 8000, 0x17, 0x0}, {12000000, 8000, 0x6, 0x1}, /* 11.025k */ {11289600, 11025, 0x18, 0x0}, {16934400, 11025, 0x19, 0x0}, {12000000, 11025, 0x19, 0x1}, /* 16k */ {12288000, 16000, 0xa, 0x0}, {18432000, 16000, 0xb, 0x0}, {12000000, 16000, 0xa, 0x1}, /* 22.05k */ {11289600, 22050, 0x1a, 0x0}, {16934400, 22050, 0x1b, 0x0}, {12000000, 22050, 0x1b, 0x1}, /* 32k */ {12288000, 32000, 0xc, 0x0}, {18432000, 32000, 0xd, 0x0}, {12000000, 32000, 0xa, 0x1}, /* 44.1k */ {11289600, 44100, 0x10, 0x0}, {16934400, 44100, 0x11, 0x0}, {12000000, 44100, 0x11, 0x1}, /* 48k */ {12288000, 48000, 0x0, 0x0}, {18432000, 48000, 0x1, 0x0}, {12000000, 48000, 0x0, 0x1}, /* 88.2k */ {11289600, 88200, 0x1e, 0x0}, {16934400, 88200, 0x1f, 0x0}, {12000000, 88200, 0x1f, 0x1}, /* 96k */ {12288000, 96000, 0xe, 0x0}, {18432000, 96000, 0xf, 0x0}, {12000000, 96000, 0xe, 0x1}, }; static int get_coeff(int mclk, int rate) { int i; for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk) return i; } return -EINVAL; } /* * Clock after PLL and dividers */ static int wm8753_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8753_priv *wm8753 = codec->private_data; switch (freq) { case 11289600: case 12000000: case 12288000: case 16934400: case 18432000: if (clk_id == WM8753_MCLK) { wm8753->sysclk = freq; return 0; } else if (clk_id == WM8753_PCMCLK) { wm8753->pcmclk = freq; return 0; } break; } return -EINVAL; } /* * Set's ADC and Voice DAC format. */ static int wm8753_vdac_adc_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 voice = wm8753_read_reg_cache(codec, WM8753_PCM) & 0x01ec; /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: voice |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: voice |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: voice |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: voice |= 0x0013; break; default: return -EINVAL; } wm8753_write(codec, WM8753_PCM, voice); return 0; } /* * Set PCM DAI bit size and sample rate. */ static int wm8753_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_codec *codec = socdev->card->codec; struct wm8753_priv *wm8753 = codec->private_data; u16 voice = wm8753_read_reg_cache(codec, WM8753_PCM) & 0x01f3; u16 srate = wm8753_read_reg_cache(codec, WM8753_SRATE1) & 0x017f; /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: voice |= 0x0004; break; case SNDRV_PCM_FORMAT_S24_LE: voice |= 0x0008; break; case SNDRV_PCM_FORMAT_S32_LE: voice |= 0x000c; break; } /* sample rate */ if (params_rate(params) * 384 == wm8753->pcmclk) srate |= 0x80; wm8753_write(codec, WM8753_SRATE1, srate); wm8753_write(codec, WM8753_PCM, voice); return 0; } /* * Set's PCM dai fmt and BCLK. */ static int wm8753_pcm_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 voice, ioctl; voice = wm8753_read_reg_cache(codec, WM8753_PCM) & 0x011f; ioctl = wm8753_read_reg_cache(codec, WM8753_IOCTL) & 0x015d; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: break; case SND_SOC_DAIFMT_CBM_CFM: ioctl |= 0x2; case SND_SOC_DAIFMT_CBM_CFS: voice |= 0x0040; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: /* frame inversion not valid for DSP modes */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_NF: voice |= 0x0080; break; default: return -EINVAL; } break; case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_RIGHT_J: case SND_SOC_DAIFMT_LEFT_J: voice &= ~0x0010; switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: voice |= 0x0090; break; case SND_SOC_DAIFMT_IB_NF: voice |= 0x0080; break; case SND_SOC_DAIFMT_NB_IF: voice |= 0x0010; break; default: return -EINVAL; } break; default: return -EINVAL; } wm8753_write(codec, WM8753_PCM, voice); wm8753_write(codec, WM8753_IOCTL, ioctl); return 0; } static int wm8753_set_dai_clkdiv(struct snd_soc_dai *codec_dai, int div_id, int div) { struct snd_soc_codec *codec = codec_dai->codec; u16 reg; switch (div_id) { case WM8753_PCMDIV: reg = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0x003f; wm8753_write(codec, WM8753_CLOCK, reg | div); break; case WM8753_BCLKDIV: reg = wm8753_read_reg_cache(codec, WM8753_SRATE2) & 0x01c7; wm8753_write(codec, WM8753_SRATE2, reg | div); break; case WM8753_VXCLKDIV: reg = wm8753_read_reg_cache(codec, WM8753_SRATE2) & 0x003f; wm8753_write(codec, WM8753_SRATE2, reg | div); break; default: return -EINVAL; } return 0; } /* * Set's HiFi DAC format. */ static int wm8753_hdac_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 hifi = wm8753_read_reg_cache(codec, WM8753_HIFI) & 0x01e0; /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: hifi |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: hifi |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: hifi |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: hifi |= 0x0013; break; default: return -EINVAL; } wm8753_write(codec, WM8753_HIFI, hifi); return 0; } /* * Set's I2S DAI format. */ static int wm8753_i2s_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 ioctl, hifi; hifi = wm8753_read_reg_cache(codec, WM8753_HIFI) & 0x011f; ioctl = wm8753_read_reg_cache(codec, WM8753_IOCTL) & 0x00ae; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: break; case SND_SOC_DAIFMT_CBM_CFM: ioctl |= 0x1; case SND_SOC_DAIFMT_CBM_CFS: hifi |= 0x0040; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: /* frame inversion not valid for DSP modes */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_NF: hifi |= 0x0080; break; default: return -EINVAL; } break; case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_RIGHT_J: case SND_SOC_DAIFMT_LEFT_J: hifi &= ~0x0010; switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: hifi |= 0x0090; break; case SND_SOC_DAIFMT_IB_NF: hifi |= 0x0080; break; case SND_SOC_DAIFMT_NB_IF: hifi |= 0x0010; break; default: return -EINVAL; } break; default: return -EINVAL; } wm8753_write(codec, WM8753_HIFI, hifi); wm8753_write(codec, WM8753_IOCTL, ioctl); return 0; } /* * Set PCM DAI bit size and sample rate. */ static int wm8753_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_device *socdev = rtd->socdev; struct snd_soc_codec *codec = socdev->card->codec; struct wm8753_priv *wm8753 = codec->private_data; u16 srate = wm8753_read_reg_cache(codec, WM8753_SRATE1) & 0x01c0; u16 hifi = wm8753_read_reg_cache(codec, WM8753_HIFI) & 0x01f3; int coeff; /* is digital filter coefficient valid ? */ coeff = get_coeff(wm8753->sysclk, params_rate(params)); if (coeff < 0) { printk(KERN_ERR "wm8753 invalid MCLK or rate\n"); return coeff; } wm8753_write(codec, WM8753_SRATE1, srate | (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb); /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: hifi |= 0x0004; break; case SNDRV_PCM_FORMAT_S24_LE: hifi |= 0x0008; break; case SNDRV_PCM_FORMAT_S32_LE: hifi |= 0x000c; break; } wm8753_write(codec, WM8753_HIFI, hifi); return 0; } static int wm8753_mode1v_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 clock; /* set clk source as pcmclk */ clock = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xfffb; wm8753_write(codec, WM8753_CLOCK, clock); if (wm8753_vdac_adc_set_dai_fmt(codec_dai, fmt) < 0) return -EINVAL; return wm8753_pcm_set_dai_fmt(codec_dai, fmt); } static int wm8753_mode1h_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { if (wm8753_hdac_set_dai_fmt(codec_dai, fmt) < 0) return -EINVAL; return wm8753_i2s_set_dai_fmt(codec_dai, fmt); } static int wm8753_mode2_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 clock; /* set clk source as pcmclk */ clock = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xfffb; wm8753_write(codec, WM8753_CLOCK, clock); if (wm8753_vdac_adc_set_dai_fmt(codec_dai, fmt) < 0) return -EINVAL; return wm8753_i2s_set_dai_fmt(codec_dai, fmt); } static int wm8753_mode3_4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 clock; /* set clk source as mclk */ clock = wm8753_read_reg_cache(codec, WM8753_CLOCK) & 0xfffb; wm8753_write(codec, WM8753_CLOCK, clock | 0x4); if (wm8753_hdac_set_dai_fmt(codec_dai, fmt) < 0) return -EINVAL; if (wm8753_vdac_adc_set_dai_fmt(codec_dai, fmt) < 0) return -EINVAL; return wm8753_i2s_set_dai_fmt(codec_dai, fmt); } static int wm8753_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 mute_reg = wm8753_read_reg_cache(codec, WM8753_DAC) & 0xfff7; /* the digital mute covers the HiFi and Voice DAC's on the WM8753. * make sure we check if they are not both active when we mute */ if (mute && dai->id == 1) { if (!wm8753_dai[WM8753_DAI_VOICE].playback.active || !wm8753_dai[WM8753_DAI_HIFI].playback.active) wm8753_write(codec, WM8753_DAC, mute_reg | 0x8); } else { if (mute) wm8753_write(codec, WM8753_DAC, mute_reg | 0x8); else wm8753_write(codec, WM8753_DAC, mute_reg); } return 0; } static int wm8753_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { u16 pwr_reg = wm8753_read_reg_cache(codec, WM8753_PWR1) & 0xfe3e; switch (level) { case SND_SOC_BIAS_ON: /* set vmid to 50k and unmute dac */ wm8753_write(codec, WM8753_PWR1, pwr_reg | 0x00c0); break; case SND_SOC_BIAS_PREPARE: /* set vmid to 5k for quick power up */ wm8753_write(codec, WM8753_PWR1, pwr_reg | 0x01c1); break; case SND_SOC_BIAS_STANDBY: /* mute dac and set vmid to 500k, enable VREF */ wm8753_write(codec, WM8753_PWR1, pwr_reg | 0x0141); break; case SND_SOC_BIAS_OFF: wm8753_write(codec, WM8753_PWR1, 0x0001); break; } codec->bias_level = level; return 0; } #define WM8753_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |\ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) #define WM8753_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) /* * The WM8753 supports upto 4 different and mutually exclusive DAI * configurations. This gives 2 PCM's available for use, hifi and voice. * NOTE: The Voice PCM cannot play or capture audio to the CPU as it's DAI * is connected between the wm8753 and a BT codec or GSM modem. * * 1. Voice over PCM DAI - HIFI DAC over HIFI DAI * 2. Voice over HIFI DAI - HIFI disabled * 3. Voice disabled - HIFI over HIFI * 4. Voice disabled - HIFI over HIFI, uses voice DAI LRC for capture */ static struct snd_soc_dai_ops wm8753_dai_ops_hifi_mode1 = { .hw_params = wm8753_i2s_hw_params, .digital_mute = wm8753_mute, .set_fmt = wm8753_mode1h_set_dai_fmt, .set_clkdiv = wm8753_set_dai_clkdiv, .set_pll = wm8753_set_dai_pll, .set_sysclk = wm8753_set_dai_sysclk, }; static struct snd_soc_dai_ops wm8753_dai_ops_voice_mode1 = { .hw_params = wm8753_pcm_hw_params, .digital_mute = wm8753_mute, .set_fmt = wm8753_mode1v_set_dai_fmt, .set_clkdiv = wm8753_set_dai_clkdiv, .set_pll = wm8753_set_dai_pll, .set_sysclk = wm8753_set_dai_sysclk, }; static struct snd_soc_dai_ops wm8753_dai_ops_voice_mode2 = { .hw_params = wm8753_pcm_hw_params, .digital_mute = wm8753_mute, .set_fmt = wm8753_mode2_set_dai_fmt, .set_clkdiv = wm8753_set_dai_clkdiv, .set_pll = wm8753_set_dai_pll, .set_sysclk = wm8753_set_dai_sysclk, }; static struct snd_soc_dai_ops wm8753_dai_ops_hifi_mode3 = { .hw_params = wm8753_i2s_hw_params, .digital_mute = wm8753_mute, .set_fmt = wm8753_mode3_4_set_dai_fmt, .set_clkdiv = wm8753_set_dai_clkdiv, .set_pll = wm8753_set_dai_pll, .set_sysclk = wm8753_set_dai_sysclk, }; static struct snd_soc_dai_ops wm8753_dai_ops_hifi_mode4 = { .hw_params = wm8753_i2s_hw_params, .digital_mute = wm8753_mute, .set_fmt = wm8753_mode3_4_set_dai_fmt, .set_clkdiv = wm8753_set_dai_clkdiv, .set_pll = wm8753_set_dai_pll, .set_sysclk = wm8753_set_dai_sysclk, }; static const struct snd_soc_dai wm8753_all_dai[] = { /* DAI HiFi mode 1 */ { .name = "WM8753 HiFi", .id = 1, .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS}, .capture = { /* dummy for fast DAI switching */ .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS}, .ops = &wm8753_dai_ops_hifi_mode1, }, /* DAI Voice mode 1 */ { .name = "WM8753 Voice", .id = 1, .playback = { .stream_name = "Voice Playback", .channels_min = 1, .channels_max = 1, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .ops = &wm8753_dai_ops_voice_mode1, }, /* DAI HiFi mode 2 - dummy */ { .name = "WM8753 HiFi", .id = 2, }, /* DAI Voice mode 2 */ { .name = "WM8753 Voice", .id = 2, .playback = { .stream_name = "Voice Playback", .channels_min = 1, .channels_max = 1, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .ops = &wm8753_dai_ops_voice_mode2, }, /* DAI HiFi mode 3 */ { .name = "WM8753 HiFi", .id = 3, .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .ops = &wm8753_dai_ops_hifi_mode3, }, /* DAI Voice mode 3 - dummy */ { .name = "WM8753 Voice", .id = 3, }, /* DAI HiFi mode 4 */ { .name = "WM8753 HiFi", .id = 4, .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8753_RATES, .formats = WM8753_FORMATS,}, .ops = &wm8753_dai_ops_hifi_mode4, }, /* DAI Voice mode 4 - dummy */ { .name = "WM8753 Voice", .id = 4, }, }; struct snd_soc_dai wm8753_dai[] = { { .name = "WM8753 DAI 0", }, { .name = "WM8753 DAI 1", }, }; EXPORT_SYMBOL_GPL(wm8753_dai); static void wm8753_set_dai_mode(struct snd_soc_codec *codec, unsigned int mode) { if (mode < 4) { int playback_active, capture_active, codec_active, pop_wait; void *private_data; struct list_head list; playback_active = wm8753_dai[0].playback.active; capture_active = wm8753_dai[0].capture.active; codec_active = wm8753_dai[0].active; private_data = wm8753_dai[0].private_data; pop_wait = wm8753_dai[0].pop_wait; list = wm8753_dai[0].list; wm8753_dai[0] = wm8753_all_dai[mode << 1]; wm8753_dai[0].playback.active = playback_active; wm8753_dai[0].capture.active = capture_active; wm8753_dai[0].active = codec_active; wm8753_dai[0].private_data = private_data; wm8753_dai[0].pop_wait = pop_wait; wm8753_dai[0].list = list; playback_active = wm8753_dai[1].playback.active; capture_active = wm8753_dai[1].capture.active; codec_active = wm8753_dai[1].active; private_data = wm8753_dai[1].private_data; pop_wait = wm8753_dai[1].pop_wait; list = wm8753_dai[1].list; wm8753_dai[1] = wm8753_all_dai[(mode << 1) + 1]; wm8753_dai[1].playback.active = playback_active; wm8753_dai[1].capture.active = capture_active; wm8753_dai[1].active = codec_active; wm8753_dai[1].private_data = private_data; wm8753_dai[1].pop_wait = pop_wait; wm8753_dai[1].list = list; } wm8753_dai[0].codec = codec; wm8753_dai[1].codec = codec; } static void wm8753_work(struct work_struct *work) { struct snd_soc_codec *codec = container_of(work, struct snd_soc_codec, delayed_work.work); wm8753_set_bias_level(codec, codec->bias_level); } static int wm8753_suspend(struct platform_device *pdev, pm_message_t state) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec = socdev->card->codec; /* we only need to suspend if we are a valid card */ if (!codec->card) return 0; wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8753_resume(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec = socdev->card->codec; int i; u8 data[2]; u16 *cache = codec->reg_cache; /* we only need to resume if we are a valid card */ if (!codec->card) return 0; /* Sync reg_cache with the hardware */ for (i = 0; i < ARRAY_SIZE(wm8753_reg); i++) { if (i + 1 == WM8753_RESET) continue; /* No point in writing hardware default values back */ if (cache[i] == wm8753_reg[i]) continue; data[0] = ((i + 1) << 1) | ((cache[i] >> 8) & 0x0001); data[1] = cache[i] & 0x00ff; codec->hw_write(codec->control_data, data, 2); } wm8753_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* charge wm8753 caps */ if (codec->suspend_bias_level == SND_SOC_BIAS_ON) { wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE); codec->bias_level = SND_SOC_BIAS_ON; schedule_delayed_work(&codec->delayed_work, msecs_to_jiffies(caps_charge)); } return 0; } static struct snd_soc_codec *wm8753_codec; static int wm8753_probe(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); struct snd_soc_codec *codec; int ret = 0; if (!wm8753_codec) { dev_err(&pdev->dev, "WM8753 codec not yet registered\n"); return -EINVAL; } socdev->card->codec = wm8753_codec; codec = wm8753_codec; wm8753_set_dai_mode(codec, 0); /* register pcms */ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); if (ret < 0) { printk(KERN_ERR "wm8753: failed to create pcms\n"); goto pcm_err; } snd_soc_add_controls(codec, wm8753_snd_controls, ARRAY_SIZE(wm8753_snd_controls)); wm8753_add_widgets(codec); ret = snd_soc_init_card(socdev); if (ret < 0) { printk(KERN_ERR "wm8753: failed to register card\n"); goto card_err; } return 0; card_err: snd_soc_free_pcms(socdev); snd_soc_dapm_free(socdev); pcm_err: return ret; } /* * This function forces any delayed work to be queued and run. */ static int run_delayed_work(struct delayed_work *dwork) { int ret; /* cancel any work waiting to be queued. */ ret = cancel_delayed_work(dwork); /* if there was any work waiting then we run it now and * wait for it's completion */ if (ret) { schedule_delayed_work(dwork, 0); flush_scheduled_work(); } return ret; } /* power down chip */ static int wm8753_remove(struct platform_device *pdev) { struct snd_soc_device *socdev = platform_get_drvdata(pdev); snd_soc_free_pcms(socdev); snd_soc_dapm_free(socdev); return 0; } struct snd_soc_codec_device soc_codec_dev_wm8753 = { .probe = wm8753_probe, .remove = wm8753_remove, .suspend = wm8753_suspend, .resume = wm8753_resume, }; EXPORT_SYMBOL_GPL(soc_codec_dev_wm8753); static int wm8753_register(struct wm8753_priv *wm8753) { int ret, i; struct snd_soc_codec *codec = &wm8753->codec; u16 reg; if (wm8753_codec) { dev_err(codec->dev, "Multiple WM8753 devices not supported\n"); ret = -EINVAL; goto err; } mutex_init(&codec->mutex); INIT_LIST_HEAD(&codec->dapm_widgets); INIT_LIST_HEAD(&codec->dapm_paths); codec->name = "WM8753"; codec->owner = THIS_MODULE; codec->read = wm8753_read_reg_cache; codec->write = wm8753_write; codec->bias_level = SND_SOC_BIAS_STANDBY; codec->set_bias_level = wm8753_set_bias_level; codec->dai = wm8753_dai; codec->num_dai = 2; codec->reg_cache_size = ARRAY_SIZE(wm8753->reg_cache) + 1; codec->reg_cache = &wm8753->reg_cache; codec->private_data = wm8753; memcpy(codec->reg_cache, wm8753_reg, sizeof(wm8753->reg_cache)); INIT_DELAYED_WORK(&codec->delayed_work, wm8753_work); ret = wm8753_reset(codec); if (ret < 0) { dev_err(codec->dev, "Failed to issue reset\n"); goto err; } /* charge output caps */ wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE); schedule_delayed_work(&codec->delayed_work, msecs_to_jiffies(caps_charge)); /* set the update bits */ reg = wm8753_read_reg_cache(codec, WM8753_LDAC); wm8753_write(codec, WM8753_LDAC, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_RDAC); wm8753_write(codec, WM8753_RDAC, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_LADC); wm8753_write(codec, WM8753_LADC, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_RADC); wm8753_write(codec, WM8753_RADC, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_LOUT1V); wm8753_write(codec, WM8753_LOUT1V, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_ROUT1V); wm8753_write(codec, WM8753_ROUT1V, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_LOUT2V); wm8753_write(codec, WM8753_LOUT2V, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_ROUT2V); wm8753_write(codec, WM8753_ROUT2V, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_LINVOL); wm8753_write(codec, WM8753_LINVOL, reg | 0x0100); reg = wm8753_read_reg_cache(codec, WM8753_RINVOL); wm8753_write(codec, WM8753_RINVOL, reg | 0x0100); wm8753_codec = codec; for (i = 0; i < ARRAY_SIZE(wm8753_dai); i++) wm8753_dai[i].dev = codec->dev; ret = snd_soc_register_codec(codec); if (ret != 0) { dev_err(codec->dev, "Failed to register codec: %d\n", ret); goto err; } ret = snd_soc_register_dais(&wm8753_dai[0], ARRAY_SIZE(wm8753_dai)); if (ret != 0) { dev_err(codec->dev, "Failed to register DAIs: %d\n", ret); goto err_codec; } return 0; err_codec: run_delayed_work(&codec->delayed_work); snd_soc_unregister_codec(codec); err: kfree(wm8753); return ret; } static void wm8753_unregister(struct wm8753_priv *wm8753) { wm8753_set_bias_level(&wm8753->codec, SND_SOC_BIAS_OFF); run_delayed_work(&wm8753->codec.delayed_work); snd_soc_unregister_dais(&wm8753_dai[0], ARRAY_SIZE(wm8753_dai)); snd_soc_unregister_codec(&wm8753->codec); kfree(wm8753); wm8753_codec = NULL; } #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static int wm8753_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct snd_soc_codec *codec; struct wm8753_priv *wm8753; wm8753 = kzalloc(sizeof(struct wm8753_priv), GFP_KERNEL); if (wm8753 == NULL) return -ENOMEM; codec = &wm8753->codec; codec->hw_write = (hw_write_t)i2c_master_send; codec->control_data = i2c; i2c_set_clientdata(i2c, wm8753); codec->dev = &i2c->dev; return wm8753_register(wm8753); } static int wm8753_i2c_remove(struct i2c_client *client) { struct wm8753_priv *wm8753 = i2c_get_clientdata(client); wm8753_unregister(wm8753); return 0; } #ifdef CONFIG_PM static int wm8753_i2c_suspend(struct i2c_client *client, pm_message_t msg) { return snd_soc_suspend_device(&client->dev); } static int wm8753_i2c_resume(struct i2c_client *client) { return snd_soc_resume_device(&client->dev); } #else #define wm8753_i2c_suspend NULL #define wm8753_i2c_resume NULL #endif static const struct i2c_device_id wm8753_i2c_id[] = { { "wm8753", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8753_i2c_id); static struct i2c_driver wm8753_i2c_driver = { .driver = { .name = "wm8753", .owner = THIS_MODULE, }, .probe = wm8753_i2c_probe, .remove = wm8753_i2c_remove, .suspend = wm8753_i2c_suspend, .resume = wm8753_i2c_resume, .id_table = wm8753_i2c_id, }; #endif #if defined(CONFIG_SPI_MASTER) static int wm8753_spi_write(struct spi_device *spi, const char *data, int len) { struct spi_transfer t; struct spi_message m; u8 msg[2]; if (len <= 0) return 0; msg[0] = data[0]; msg[1] = data[1]; spi_message_init(&m); memset(&t, 0, (sizeof t)); t.tx_buf = &msg[0]; t.len = len; spi_message_add_tail(&t, &m); spi_sync(spi, &m); return len; } static int __devinit wm8753_spi_probe(struct spi_device *spi) { struct snd_soc_codec *codec; struct wm8753_priv *wm8753; wm8753 = kzalloc(sizeof(struct wm8753_priv), GFP_KERNEL); if (wm8753 == NULL) return -ENOMEM; codec = &wm8753->codec; codec->control_data = spi; codec->hw_write = (hw_write_t)wm8753_spi_write; codec->dev = &spi->dev; dev_set_drvdata(&spi->dev, wm8753); return wm8753_register(wm8753); } static int __devexit wm8753_spi_remove(struct spi_device *spi) { struct wm8753_priv *wm8753 = dev_get_drvdata(&spi->dev); wm8753_unregister(wm8753); return 0; } #ifdef CONFIG_PM static int wm8753_spi_suspend(struct spi_device *spi, pm_message_t msg) { return snd_soc_suspend_device(&spi->dev); } static int wm8753_spi_resume(struct spi_device *spi) { return snd_soc_resume_device(&spi->dev); } #else #define wm8753_spi_suspend NULL #define wm8753_spi_resume NULL #endif static struct spi_driver wm8753_spi_driver = { .driver = { .name = "wm8753", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = wm8753_spi_probe, .remove = __devexit_p(wm8753_spi_remove), .suspend = wm8753_spi_suspend, .resume = wm8753_spi_resume, }; #endif static int __init wm8753_modinit(void) { int ret; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&wm8753_i2c_driver); if (ret != 0) pr_err("Failed to register WM8753 I2C driver: %d\n", ret); #endif #if defined(CONFIG_SPI_MASTER) ret = spi_register_driver(&wm8753_spi_driver); if (ret != 0) pr_err("Failed to register WM8753 SPI driver: %d\n", ret); #endif return 0; } module_init(wm8753_modinit); static void __exit wm8753_exit(void) { #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_driver(&wm8753_i2c_driver); #endif #if defined(CONFIG_SPI_MASTER) spi_unregister_driver(&wm8753_spi_driver); #endif } module_exit(wm8753_exit); MODULE_DESCRIPTION("ASoC WM8753 driver"); MODULE_AUTHOR("Liam Girdwood"); MODULE_LICENSE("GPL");
gpl-2.0
jiankangshiye/linux-2.6.32.63-mini2440
arch/mn10300/kernel/mn10300-serial.c
507
36695
/* MN10300 On-chip serial port UART driver * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ static const char serial_name[] = "MN10300 Serial driver"; static const char serial_version[] = "mn10300_serial-1.0"; static const char serial_revdate[] = "2007-11-06"; #if defined(CONFIG_MN10300_TTYSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/serial.h> #include <linux/circ_buf.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/major.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <asm/system.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/bitops.h> #include <asm/serial-regs.h> #include <unit/timex.h> #include "mn10300-serial.h" static inline __attribute__((format(printf, 1, 2))) void no_printk(const char *fmt, ...) { } #define kenter(FMT, ...) \ printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__) #define _enter(FMT, ...) \ no_printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__) #define kdebug(FMT, ...) \ printk(KERN_DEBUG "--- " FMT "\n", ##__VA_ARGS__) #define _debug(FMT, ...) \ no_printk(KERN_DEBUG "--- " FMT "\n", ##__VA_ARGS__) #define kproto(FMT, ...) \ printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__) #define _proto(FMT, ...) \ no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__) #define NR_UARTS 3 #ifdef CONFIG_MN10300_TTYSM_CONSOLE static void mn10300_serial_console_write(struct console *co, const char *s, unsigned count); static int __init mn10300_serial_console_setup(struct console *co, char *options); static struct uart_driver mn10300_serial_driver; static struct console mn10300_serial_console = { .name = "ttySM", .write = mn10300_serial_console_write, .device = uart_console_device, .setup = mn10300_serial_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &mn10300_serial_driver, }; #endif static struct uart_driver mn10300_serial_driver = { .owner = NULL, .driver_name = "mn10300-serial", .dev_name = "ttySM", .major = TTY_MAJOR, .minor = 128, .nr = NR_UARTS, #ifdef CONFIG_MN10300_TTYSM_CONSOLE .cons = &mn10300_serial_console, #endif }; static unsigned int mn10300_serial_tx_empty(struct uart_port *); static void mn10300_serial_set_mctrl(struct uart_port *, unsigned int mctrl); static unsigned int mn10300_serial_get_mctrl(struct uart_port *); static void mn10300_serial_stop_tx(struct uart_port *); static void mn10300_serial_start_tx(struct uart_port *); static void mn10300_serial_send_xchar(struct uart_port *, char ch); static void mn10300_serial_stop_rx(struct uart_port *); static void mn10300_serial_enable_ms(struct uart_port *); static void mn10300_serial_break_ctl(struct uart_port *, int ctl); static int mn10300_serial_startup(struct uart_port *); static void mn10300_serial_shutdown(struct uart_port *); static void mn10300_serial_set_termios(struct uart_port *, struct ktermios *new, struct ktermios *old); static const char *mn10300_serial_type(struct uart_port *); static void mn10300_serial_release_port(struct uart_port *); static int mn10300_serial_request_port(struct uart_port *); static void mn10300_serial_config_port(struct uart_port *, int); static int mn10300_serial_verify_port(struct uart_port *, struct serial_struct *); static const struct uart_ops mn10300_serial_ops = { .tx_empty = mn10300_serial_tx_empty, .set_mctrl = mn10300_serial_set_mctrl, .get_mctrl = mn10300_serial_get_mctrl, .stop_tx = mn10300_serial_stop_tx, .start_tx = mn10300_serial_start_tx, .send_xchar = mn10300_serial_send_xchar, .stop_rx = mn10300_serial_stop_rx, .enable_ms = mn10300_serial_enable_ms, .break_ctl = mn10300_serial_break_ctl, .startup = mn10300_serial_startup, .shutdown = mn10300_serial_shutdown, .set_termios = mn10300_serial_set_termios, .type = mn10300_serial_type, .release_port = mn10300_serial_release_port, .request_port = mn10300_serial_request_port, .config_port = mn10300_serial_config_port, .verify_port = mn10300_serial_verify_port, }; static irqreturn_t mn10300_serial_interrupt(int irq, void *dev_id); /* * the first on-chip serial port: ttySM0 (aka SIF0) */ #ifdef CONFIG_MN10300_TTYSM0 struct mn10300_serial_port mn10300_serial_port_sif0 = { .uart.ops = &mn10300_serial_ops, .uart.membase = (void __iomem *) &SC0CTR, .uart.mapbase = (unsigned long) &SC0CTR, .uart.iotype = UPIO_MEM, .uart.irq = 0, .uart.uartclk = 0, /* MN10300_IOCLK, */ .uart.fifosize = 1, .uart.flags = UPF_BOOT_AUTOCONF, .uart.line = 0, .uart.type = PORT_MN10300, .uart.lock = __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif0.uart.lock), .name = "ttySM0", ._iobase = &SC0CTR, ._control = &SC0CTR, ._status = (volatile u8 *) &SC0STR, ._intr = &SC0ICR, ._rxb = &SC0RXB, ._txb = &SC0TXB, .rx_name = "ttySM0/Rx", .tx_name = "ttySM0/Tx", #ifdef CONFIG_MN10300_TTYSM0_TIMER8 .tm_name = "ttySM0/Timer8", ._tmxmd = &TM8MD, ._tmxbr = &TM8BR, ._tmicr = &TM8ICR, .tm_irq = TM8IRQ, .div_timer = MNSCx_DIV_TIMER_16BIT, #else /* CONFIG_MN10300_TTYSM0_TIMER2 */ .tm_name = "ttySM0/Timer2", ._tmxmd = &TM2MD, ._tmxbr = (volatile u16 *) &TM2BR, ._tmicr = &TM2ICR, .tm_irq = TM2IRQ, .div_timer = MNSCx_DIV_TIMER_8BIT, #endif .rx_irq = SC0RXIRQ, .tx_irq = SC0TXIRQ, .rx_icr = &GxICR(SC0RXIRQ), .tx_icr = &GxICR(SC0TXIRQ), .clock_src = MNSCx_CLOCK_SRC_IOCLK, .options = 0, #ifdef CONFIG_GDBSTUB_ON_TTYSM0 .gdbstub = 1, #endif }; #endif /* CONFIG_MN10300_TTYSM0 */ /* * the second on-chip serial port: ttySM1 (aka SIF1) */ #ifdef CONFIG_MN10300_TTYSM1 struct mn10300_serial_port mn10300_serial_port_sif1 = { .uart.ops = &mn10300_serial_ops, .uart.membase = (void __iomem *) &SC1CTR, .uart.mapbase = (unsigned long) &SC1CTR, .uart.iotype = UPIO_MEM, .uart.irq = 0, .uart.uartclk = 0, /* MN10300_IOCLK, */ .uart.fifosize = 1, .uart.flags = UPF_BOOT_AUTOCONF, .uart.line = 1, .uart.type = PORT_MN10300, .uart.lock = __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif1.uart.lock), .name = "ttySM1", ._iobase = &SC1CTR, ._control = &SC1CTR, ._status = (volatile u8 *) &SC1STR, ._intr = &SC1ICR, ._rxb = &SC1RXB, ._txb = &SC1TXB, .rx_name = "ttySM1/Rx", .tx_name = "ttySM1/Tx", #ifdef CONFIG_MN10300_TTYSM1_TIMER9 .tm_name = "ttySM1/Timer9", ._tmxmd = &TM9MD, ._tmxbr = &TM9BR, ._tmicr = &TM9ICR, .tm_irq = TM9IRQ, .div_timer = MNSCx_DIV_TIMER_16BIT, #else /* CONFIG_MN10300_TTYSM1_TIMER3 */ .tm_name = "ttySM1/Timer3", ._tmxmd = &TM3MD, ._tmxbr = (volatile u16 *) &TM3BR, ._tmicr = &TM3ICR, .tm_irq = TM3IRQ, .div_timer = MNSCx_DIV_TIMER_8BIT, #endif .rx_irq = SC1RXIRQ, .tx_irq = SC1TXIRQ, .rx_icr = &GxICR(SC1RXIRQ), .tx_icr = &GxICR(SC1TXIRQ), .clock_src = MNSCx_CLOCK_SRC_IOCLK, .options = 0, #ifdef CONFIG_GDBSTUB_ON_TTYSM1 .gdbstub = 1, #endif }; #endif /* CONFIG_MN10300_TTYSM1 */ /* * the third on-chip serial port: ttySM2 (aka SIF2) */ #ifdef CONFIG_MN10300_TTYSM2 struct mn10300_serial_port mn10300_serial_port_sif2 = { .uart.ops = &mn10300_serial_ops, .uart.membase = (void __iomem *) &SC2CTR, .uart.mapbase = (unsigned long) &SC2CTR, .uart.iotype = UPIO_MEM, .uart.irq = 0, .uart.uartclk = 0, /* MN10300_IOCLK, */ .uart.fifosize = 1, .uart.flags = UPF_BOOT_AUTOCONF, .uart.line = 2, #ifdef CONFIG_MN10300_TTYSM2_CTS .uart.type = PORT_MN10300_CTS, #else .uart.type = PORT_MN10300, #endif .uart.lock = __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock), .name = "ttySM2", .rx_name = "ttySM2/Rx", .tx_name = "ttySM2/Tx", .tm_name = "ttySM2/Timer10", ._iobase = &SC2CTR, ._control = &SC2CTR, ._status = &SC2STR, ._intr = &SC2ICR, ._rxb = &SC2RXB, ._txb = &SC2TXB, ._tmxmd = &TM10MD, ._tmxbr = &TM10BR, ._tmicr = &TM10ICR, .tm_irq = TM10IRQ, .div_timer = MNSCx_DIV_TIMER_16BIT, .rx_irq = SC2RXIRQ, .tx_irq = SC2TXIRQ, .rx_icr = &GxICR(SC2RXIRQ), .tx_icr = &GxICR(SC2TXIRQ), .clock_src = MNSCx_CLOCK_SRC_IOCLK, #ifdef CONFIG_MN10300_TTYSM2_CTS .options = MNSCx_OPT_CTS, #else .options = 0, #endif #ifdef CONFIG_GDBSTUB_ON_TTYSM2 .gdbstub = 1, #endif }; #endif /* CONFIG_MN10300_TTYSM2 */ /* * list of available serial ports */ struct mn10300_serial_port *mn10300_serial_ports[NR_UARTS + 1] = { #ifdef CONFIG_MN10300_TTYSM0 [0] = &mn10300_serial_port_sif0, #endif #ifdef CONFIG_MN10300_TTYSM1 [1] = &mn10300_serial_port_sif1, #endif #ifdef CONFIG_MN10300_TTYSM2 [2] = &mn10300_serial_port_sif2, #endif [NR_UARTS] = NULL, }; /* * we abuse the serial ports' baud timers' interrupt lines to get the ability * to deliver interrupts to userspace as we use the ports' interrupt lines to * do virtual DMA on account of the ports having no hardware FIFOs * * we can generate an interrupt manually in the assembly stubs by writing to * the enable and detect bits in the interrupt control register, so all we need * to do here is disable the interrupt line * * note that we can't just leave the line enabled as the baud rate timer *also* * generates interrupts */ static void mn10300_serial_mask_ack(unsigned int irq) { u16 tmp; GxICR(irq) = GxICR_LEVEL_6; tmp = GxICR(irq); /* flush write buffer */ } static void mn10300_serial_nop(unsigned int irq) { } static struct irq_chip mn10300_serial_pic = { .name = "mnserial", .ack = mn10300_serial_mask_ack, .mask = mn10300_serial_mask_ack, .mask_ack = mn10300_serial_mask_ack, .unmask = mn10300_serial_nop, .end = mn10300_serial_nop, }; /* * serial virtual DMA interrupt jump table */ struct mn10300_serial_int mn10300_serial_int_tbl[NR_IRQS]; static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port) { u16 x; *port->tx_icr = GxICR_LEVEL_1 | GxICR_DETECT; x = *port->tx_icr; } static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port) { u16 x; *port->tx_icr = GxICR_LEVEL_1 | GxICR_ENABLE; x = *port->tx_icr; } static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port) { u16 x; *port->rx_icr = GxICR_LEVEL_1 | GxICR_DETECT; x = *port->rx_icr; } /* * multi-bit equivalent of test_and_clear_bit() */ static int mask_test_and_clear(volatile u8 *ptr, u8 mask) { u32 epsw; asm volatile(" bclr %1,(%2) \n" " mov epsw,%0 \n" : "=d"(epsw) : "d"(mask), "a"(ptr)); return !(epsw & EPSW_FLAG_Z); } /* * receive chars from the ring buffer for this serial port * - must do break detection here (not done in the UART) */ static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port) { struct uart_icount *icount = &port->uart.icount; struct tty_struct *tty = port->uart.state->port.tty; unsigned ix; int count; u8 st, ch, push, status, overrun; _enter("%s", port->name); push = 0; count = CIRC_CNT(port->rx_inp, port->rx_outp, MNSC_BUFFER_SIZE); count = tty_buffer_request_room(tty, count); if (count == 0) { if (!tty->low_latency) tty_flip_buffer_push(tty); return; } try_again: /* pull chars out of the hat */ ix = port->rx_outp; if (ix == port->rx_inp) { if (push && !tty->low_latency) tty_flip_buffer_push(tty); return; } ch = port->rx_buffer[ix++]; st = port->rx_buffer[ix++]; smp_rmb(); port->rx_outp = ix & (MNSC_BUFFER_SIZE - 1); port->uart.icount.rx++; st &= SC01STR_FEF | SC01STR_PEF | SC01STR_OEF; status = 0; overrun = 0; /* the UART doesn't detect BREAK, so we have to do that ourselves * - it starts as a framing error on a NUL character * - then we count another two NUL characters before issuing TTY_BREAK * - then we end on a normal char or one that has all the bottom bits * zero and the top bits set */ switch (port->rx_brk) { case 0: /* not breaking at the moment */ break; case 1: if (st & SC01STR_FEF && ch == 0) { port->rx_brk = 2; goto try_again; } goto not_break; case 2: if (st & SC01STR_FEF && ch == 0) { port->rx_brk = 3; _proto("Rx Break Detected"); icount->brk++; if (uart_handle_break(&port->uart)) goto ignore_char; status |= 1 << TTY_BREAK; goto insert; } goto not_break; default: if (st & (SC01STR_FEF | SC01STR_PEF | SC01STR_OEF)) goto try_again; /* still breaking */ port->rx_brk = 0; /* end of the break */ switch (ch) { case 0xFF: case 0xFE: case 0xFC: case 0xF8: case 0xF0: case 0xE0: case 0xC0: case 0x80: case 0x00: /* discard char at probable break end */ goto try_again; } break; } process_errors: /* handle framing error */ if (st & SC01STR_FEF) { if (ch == 0) { /* framing error with NUL char is probably a BREAK */ port->rx_brk = 1; goto try_again; } _proto("Rx Framing Error"); icount->frame++; status |= 1 << TTY_FRAME; } /* handle parity error */ if (st & SC01STR_PEF) { _proto("Rx Parity Error"); icount->parity++; status = TTY_PARITY; } /* handle normal char */ if (status == 0) { if (uart_handle_sysrq_char(&port->uart, ch)) goto ignore_char; status = (1 << TTY_NORMAL); } /* handle overrun error */ if (st & SC01STR_OEF) { if (port->rx_brk) goto try_again; _proto("Rx Overrun Error"); icount->overrun++; overrun = 1; } insert: status &= port->uart.read_status_mask; if (!overrun && !(status & port->uart.ignore_status_mask)) { int flag; if (status & (1 << TTY_BREAK)) flag = TTY_BREAK; else if (status & (1 << TTY_PARITY)) flag = TTY_PARITY; else if (status & (1 << TTY_FRAME)) flag = TTY_FRAME; else flag = TTY_NORMAL; tty_insert_flip_char(tty, ch, flag); } /* overrun is special, since it's reported immediately, and doesn't * affect the current character */ if (overrun) tty_insert_flip_char(tty, 0, TTY_OVERRUN); count--; if (count <= 0) { if (!tty->low_latency) tty_flip_buffer_push(tty); return; } ignore_char: push = 1; goto try_again; not_break: port->rx_brk = 0; goto process_errors; } /* * handle an interrupt from the serial transmission "virtual DMA" driver * - note: the interrupt routine will disable its own interrupts when the Tx * buffer is empty */ static void mn10300_serial_transmit_interrupt(struct mn10300_serial_port *port) { _enter("%s", port->name); if (!port->uart.state || !port->uart.state->port.tty) { mn10300_serial_dis_tx_intr(port); return; } if (uart_tx_stopped(&port->uart) || uart_circ_empty(&port->uart.state->xmit)) mn10300_serial_dis_tx_intr(port); if (uart_circ_chars_pending(&port->uart.state->xmit) < WAKEUP_CHARS) uart_write_wakeup(&port->uart); } /* * deal with a change in the status of the CTS line */ static void mn10300_serial_cts_changed(struct mn10300_serial_port *port, u8 st) { u16 ctr; port->tx_cts = st; port->uart.icount.cts++; /* flip the CTS state selector flag to interrupt when it changes * back */ ctr = *port->_control; ctr ^= SC2CTR_TWS; *port->_control = ctr; uart_handle_cts_change(&port->uart, st & SC2STR_CTS); wake_up_interruptible(&port->uart.state->port.delta_msr_wait); } /* * handle a virtual interrupt generated by the lower level "virtual DMA" * routines (irq is the baud timer interrupt) */ static irqreturn_t mn10300_serial_interrupt(int irq, void *dev_id) { struct mn10300_serial_port *port = dev_id; u8 st; spin_lock(&port->uart.lock); if (port->intr_flags) { _debug("INT %s: %x", port->name, port->intr_flags); if (mask_test_and_clear(&port->intr_flags, MNSCx_RX_AVAIL)) mn10300_serial_receive_interrupt(port); if (mask_test_and_clear(&port->intr_flags, MNSCx_TX_SPACE | MNSCx_TX_EMPTY)) mn10300_serial_transmit_interrupt(port); } /* the only modem control line amongst the whole lot is CTS on * serial port 2 */ if (port->type == PORT_MN10300_CTS) { st = *port->_status; if ((port->tx_cts ^ st) & SC2STR_CTS) mn10300_serial_cts_changed(port, st); } spin_unlock(&port->uart.lock); return IRQ_HANDLED; } /* * return indication of whether the hardware transmit buffer is empty */ static unsigned int mn10300_serial_tx_empty(struct uart_port *_port) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); _enter("%s", port->name); return (*port->_status & (SC01STR_TXF | SC01STR_TBF)) ? 0 : TIOCSER_TEMT; } /* * set the modem control lines (we don't have any) */ static void mn10300_serial_set_mctrl(struct uart_port *_port, unsigned int mctrl) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); _enter("%s,%x", port->name, mctrl); } /* * get the modem control line statuses */ static unsigned int mn10300_serial_get_mctrl(struct uart_port *_port) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); _enter("%s", port->name); if (port->type == PORT_MN10300_CTS && !(*port->_status & SC2STR_CTS)) return TIOCM_CAR | TIOCM_DSR; return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR; } /* * stop transmitting characters */ static void mn10300_serial_stop_tx(struct uart_port *_port) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); _enter("%s", port->name); /* disable the virtual DMA */ mn10300_serial_dis_tx_intr(port); } /* * start transmitting characters * - jump-start transmission if it has stalled * - enable the serial Tx interrupt (used by the virtual DMA controller) * - force an interrupt to happen if necessary */ static void mn10300_serial_start_tx(struct uart_port *_port) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); u16 x; _enter("%s{%lu}", port->name, CIRC_CNT(&port->uart.state->xmit.head, &port->uart.state->xmit.tail, UART_XMIT_SIZE)); /* kick the virtual DMA controller */ x = *port->tx_icr; x |= GxICR_ENABLE; if (*port->_status & SC01STR_TBF) x &= ~(GxICR_REQUEST | GxICR_DETECT); else x |= GxICR_REQUEST | GxICR_DETECT; _debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx", *port->_control, *port->_intr, *port->_status, *port->_tmxmd, *port->_tmxbr, *port->tx_icr); *port->tx_icr = x; x = *port->tx_icr; } /* * transmit a high-priority XON/XOFF character */ static void mn10300_serial_send_xchar(struct uart_port *_port, char ch) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); _enter("%s,%02x", port->name, ch); if (likely(port->gdbstub)) { port->tx_xchar = ch; if (ch) mn10300_serial_en_tx_intr(port); } } /* * stop receiving characters * - called whilst the port is being closed */ static void mn10300_serial_stop_rx(struct uart_port *_port) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); u16 ctr; _enter("%s", port->name); ctr = *port->_control; ctr &= ~SC01CTR_RXE; *port->_control = ctr; mn10300_serial_dis_rx_intr(port); } /* * enable modem status interrupts */ static void mn10300_serial_enable_ms(struct uart_port *_port) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); u16 ctr, cts; _enter("%s", port->name); if (port->type == PORT_MN10300_CTS) { /* want to interrupt when CTS goes low if CTS is now high and * vice versa */ port->tx_cts = *port->_status; cts = (port->tx_cts & SC2STR_CTS) ? SC2CTR_TWE : SC2CTR_TWE | SC2CTR_TWS; ctr = *port->_control; ctr &= ~SC2CTR_TWS; ctr |= cts; *port->_control = ctr; mn10300_serial_en_tx_intr(port); } } /* * transmit or cease transmitting a break signal */ static void mn10300_serial_break_ctl(struct uart_port *_port, int ctl) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); _enter("%s,%d", port->name, ctl); if (ctl) { /* tell the virtual DMA handler to assert BREAK */ port->tx_break = 1; mn10300_serial_en_tx_intr(port); } else { port->tx_break = 0; *port->_control &= ~SC01CTR_BKE; mn10300_serial_en_tx_intr(port); } } /* * grab the interrupts and enable the port for reception */ static int mn10300_serial_startup(struct uart_port *_port) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); struct mn10300_serial_int *pint; _enter("%s{%d}", port->name, port->gdbstub); if (unlikely(port->gdbstub)) return -EBUSY; /* allocate an Rx buffer for the virtual DMA handler */ port->rx_buffer = kmalloc(MNSC_BUFFER_SIZE, GFP_KERNEL); if (!port->rx_buffer) return -ENOMEM; port->rx_inp = port->rx_outp = 0; /* finally, enable the device */ *port->_intr = SC01ICR_TI; *port->_control |= SC01CTR_TXE | SC01CTR_RXE; pint = &mn10300_serial_int_tbl[port->rx_irq]; pint->port = port; pint->vdma = mn10300_serial_vdma_rx_handler; pint = &mn10300_serial_int_tbl[port->tx_irq]; pint->port = port; pint->vdma = mn10300_serial_vdma_tx_handler; set_intr_level(port->rx_irq, GxICR_LEVEL_1); set_intr_level(port->tx_irq, GxICR_LEVEL_1); set_irq_chip(port->tm_irq, &mn10300_serial_pic); if (request_irq(port->rx_irq, mn10300_serial_interrupt, IRQF_DISABLED, port->rx_name, port) < 0) goto error; if (request_irq(port->tx_irq, mn10300_serial_interrupt, IRQF_DISABLED, port->tx_name, port) < 0) goto error2; if (request_irq(port->tm_irq, mn10300_serial_interrupt, IRQF_DISABLED, port->tm_name, port) < 0) goto error3; mn10300_serial_mask_ack(port->tm_irq); return 0; error3: free_irq(port->tx_irq, port); error2: free_irq(port->rx_irq, port); error: kfree(port->rx_buffer); port->rx_buffer = NULL; return -EBUSY; } /* * shutdown the port and release interrupts */ static void mn10300_serial_shutdown(struct uart_port *_port) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); _enter("%s", port->name); /* disable the serial port and its baud rate timer */ port->tx_break = 0; *port->_control &= ~(SC01CTR_TXE | SC01CTR_RXE | SC01CTR_BKE); *port->_tmxmd = 0; if (port->rx_buffer) { void *buf = port->rx_buffer; port->rx_buffer = NULL; kfree(buf); } /* disable all intrs */ free_irq(port->tm_irq, port); free_irq(port->rx_irq, port); free_irq(port->tx_irq, port); *port->rx_icr = GxICR_LEVEL_1; *port->tx_icr = GxICR_LEVEL_1; } /* * this routine is called to set the UART divisor registers to match the * specified baud rate for a serial port. */ static void mn10300_serial_change_speed(struct mn10300_serial_port *port, struct ktermios *new, struct ktermios *old) { unsigned long flags; unsigned long ioclk = port->ioclk; unsigned cflag; int baud, bits, xdiv, tmp; u16 tmxbr, scxctr; u8 tmxmd, battempt; u8 div_timer = port->div_timer; _enter("%s{%lu}", port->name, ioclk); /* byte size and parity */ cflag = new->c_cflag; switch (cflag & CSIZE) { case CS7: scxctr = SC01CTR_CLN_7BIT; bits = 9; break; case CS8: scxctr = SC01CTR_CLN_8BIT; bits = 10; break; default: scxctr = SC01CTR_CLN_8BIT; bits = 10; break; } if (cflag & CSTOPB) { scxctr |= SC01CTR_STB_2BIT; bits++; } if (cflag & PARENB) { bits++; if (cflag & PARODD) scxctr |= SC01CTR_PB_ODD; #ifdef CMSPAR else if (cflag & CMSPAR) scxctr |= SC01CTR_PB_FIXED0; #endif else scxctr |= SC01CTR_PB_EVEN; } /* Determine divisor based on baud rate */ battempt = 0; if (div_timer == MNSCx_DIV_TIMER_16BIT) scxctr |= SC0CTR_CK_TM8UFLOW_8; /* ( == SC1CTR_CK_TM9UFLOW_8 * == SC2CTR_CK_TM10UFLOW) */ else if (div_timer == MNSCx_DIV_TIMER_8BIT) scxctr |= SC0CTR_CK_TM2UFLOW_8; try_alternative: baud = uart_get_baud_rate(&port->uart, new, old, 0, port->ioclk / 8); _debug("ALT %d [baud %d]", battempt, baud); if (!baud) baud = 9600; /* B0 transition handled in rs_set_termios */ xdiv = 1; if (baud == 134) { baud = 269; /* 134 is really 134.5 */ xdiv = 2; } if (baud == 38400 && (port->uart.flags & UPF_SPD_MASK) == UPF_SPD_CUST ) { _debug("CUSTOM %u", port->uart.custom_divisor); if (div_timer == MNSCx_DIV_TIMER_16BIT) { if (port->uart.custom_divisor <= 65535) { tmxmd = TM8MD_SRC_IOCLK; tmxbr = port->uart.custom_divisor; port->uart.uartclk = ioclk; goto timer_okay; } if (port->uart.custom_divisor / 8 <= 65535) { tmxmd = TM8MD_SRC_IOCLK_8; tmxbr = port->uart.custom_divisor / 8; port->uart.custom_divisor = tmxbr * 8; port->uart.uartclk = ioclk / 8; goto timer_okay; } if (port->uart.custom_divisor / 32 <= 65535) { tmxmd = TM8MD_SRC_IOCLK_32; tmxbr = port->uart.custom_divisor / 32; port->uart.custom_divisor = tmxbr * 32; port->uart.uartclk = ioclk / 32; goto timer_okay; } } else if (div_timer == MNSCx_DIV_TIMER_8BIT) { if (port->uart.custom_divisor <= 255) { tmxmd = TM2MD_SRC_IOCLK; tmxbr = port->uart.custom_divisor; port->uart.uartclk = ioclk; goto timer_okay; } if (port->uart.custom_divisor / 8 <= 255) { tmxmd = TM2MD_SRC_IOCLK_8; tmxbr = port->uart.custom_divisor / 8; port->uart.custom_divisor = tmxbr * 8; port->uart.uartclk = ioclk / 8; goto timer_okay; } if (port->uart.custom_divisor / 32 <= 255) { tmxmd = TM2MD_SRC_IOCLK_32; tmxbr = port->uart.custom_divisor / 32; port->uart.custom_divisor = tmxbr * 32; port->uart.uartclk = ioclk / 32; goto timer_okay; } } } switch (div_timer) { case MNSCx_DIV_TIMER_16BIT: port->uart.uartclk = ioclk; tmxmd = TM8MD_SRC_IOCLK; tmxbr = tmp = (ioclk / (baud * xdiv) + 4) / 8 - 1; if (tmp > 0 && tmp <= 65535) goto timer_okay; port->uart.uartclk = ioclk / 8; tmxmd = TM8MD_SRC_IOCLK_8; tmxbr = tmp = (ioclk / (baud * 8 * xdiv) + 4) / 8 - 1; if (tmp > 0 && tmp <= 65535) goto timer_okay; port->uart.uartclk = ioclk / 32; tmxmd = TM8MD_SRC_IOCLK_32; tmxbr = tmp = (ioclk / (baud * 32 * xdiv) + 4) / 8 - 1; if (tmp > 0 && tmp <= 65535) goto timer_okay; break; case MNSCx_DIV_TIMER_8BIT: port->uart.uartclk = ioclk; tmxmd = TM2MD_SRC_IOCLK; tmxbr = tmp = (ioclk / (baud * xdiv) + 4) / 8 - 1; if (tmp > 0 && tmp <= 255) goto timer_okay; port->uart.uartclk = ioclk / 8; tmxmd = TM2MD_SRC_IOCLK_8; tmxbr = tmp = (ioclk / (baud * 8 * xdiv) + 4) / 8 - 1; if (tmp > 0 && tmp <= 255) goto timer_okay; port->uart.uartclk = ioclk / 32; tmxmd = TM2MD_SRC_IOCLK_32; tmxbr = tmp = (ioclk / (baud * 32 * xdiv) + 4) / 8 - 1; if (tmp > 0 && tmp <= 255) goto timer_okay; break; default: BUG(); return; } /* refuse to change to a baud rate we can't support */ _debug("CAN'T SUPPORT"); switch (battempt) { case 0: if (old) { new->c_cflag &= ~CBAUD; new->c_cflag |= (old->c_cflag & CBAUD); battempt = 1; goto try_alternative; } case 1: /* as a last resort, if the quotient is zero, default to 9600 * bps */ new->c_cflag &= ~CBAUD; new->c_cflag |= B9600; battempt = 2; goto try_alternative; default: /* hmmm... can't seem to support 9600 either * - we could try iterating through the speeds we know about to * find the lowest */ new->c_cflag &= ~CBAUD; new->c_cflag |= B0; if (div_timer == MNSCx_DIV_TIMER_16BIT) tmxmd = TM8MD_SRC_IOCLK_32; else if (div_timer == MNSCx_DIV_TIMER_8BIT) tmxmd = TM2MD_SRC_IOCLK_32; tmxbr = 1; port->uart.uartclk = ioclk / 32; break; } timer_okay: _debug("UARTCLK: %u / %hu", port->uart.uartclk, tmxbr); /* make the changes */ spin_lock_irqsave(&port->uart.lock, flags); uart_update_timeout(&port->uart, new->c_cflag, baud); /* set the timer to produce the required baud rate */ switch (div_timer) { case MNSCx_DIV_TIMER_16BIT: *port->_tmxmd = 0; *port->_tmxbr = tmxbr; *port->_tmxmd = TM8MD_INIT_COUNTER; *port->_tmxmd = tmxmd | TM8MD_COUNT_ENABLE; break; case MNSCx_DIV_TIMER_8BIT: *port->_tmxmd = 0; *(volatile u8 *) port->_tmxbr = (u8) tmxbr; *port->_tmxmd = TM2MD_INIT_COUNTER; *port->_tmxmd = tmxmd | TM2MD_COUNT_ENABLE; break; } /* CTS flow control flag and modem status interrupts */ scxctr &= ~(SC2CTR_TWE | SC2CTR_TWS); if (port->type == PORT_MN10300_CTS && cflag & CRTSCTS) { /* want to interrupt when CTS goes low if CTS is now * high and vice versa */ port->tx_cts = *port->_status; if (port->tx_cts & SC2STR_CTS) scxctr |= SC2CTR_TWE; else scxctr |= SC2CTR_TWE | SC2CTR_TWS; } /* set up parity check flag */ port->uart.read_status_mask = (1 << TTY_NORMAL) | (1 << TTY_OVERRUN); if (new->c_iflag & INPCK) port->uart.read_status_mask |= (1 << TTY_PARITY) | (1 << TTY_FRAME); if (new->c_iflag & (BRKINT | PARMRK)) port->uart.read_status_mask |= (1 << TTY_BREAK); /* characters to ignore */ port->uart.ignore_status_mask = 0; if (new->c_iflag & IGNPAR) port->uart.ignore_status_mask |= (1 << TTY_PARITY) | (1 << TTY_FRAME); if (new->c_iflag & IGNBRK) { port->uart.ignore_status_mask |= (1 << TTY_BREAK); /* * If we're ignoring parity and break indicators, * ignore overruns to (for real raw support). */ if (new->c_iflag & IGNPAR) port->uart.ignore_status_mask |= (1 << TTY_OVERRUN); } /* Ignore all characters if CREAD is not set */ if ((new->c_cflag & CREAD) == 0) port->uart.ignore_status_mask |= (1 << TTY_NORMAL); scxctr |= *port->_control & (SC01CTR_TXE | SC01CTR_RXE | SC01CTR_BKE); *port->_control = scxctr; spin_unlock_irqrestore(&port->uart.lock, flags); } /* * set the terminal I/O parameters */ static void mn10300_serial_set_termios(struct uart_port *_port, struct ktermios *new, struct ktermios *old) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); _enter("%s,%p,%p", port->name, new, old); mn10300_serial_change_speed(port, new, old); /* handle turning off CRTSCTS */ if (!(new->c_cflag & CRTSCTS)) { u16 ctr = *port->_control; ctr &= ~SC2CTR_TWE; *port->_control = ctr; } } /* * return description of port type */ static const char *mn10300_serial_type(struct uart_port *_port) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); if (port->uart.type == PORT_MN10300_CTS) return "MN10300 SIF_CTS"; return "MN10300 SIF"; } /* * release I/O and memory regions in use by port */ static void mn10300_serial_release_port(struct uart_port *_port) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); _enter("%s", port->name); release_mem_region((unsigned long) port->_iobase, 16); } /* * request I/O and memory regions for port */ static int mn10300_serial_request_port(struct uart_port *_port) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); _enter("%s", port->name); request_mem_region((unsigned long) port->_iobase, 16, port->name); return 0; } /* * configure the type and reserve the ports */ static void mn10300_serial_config_port(struct uart_port *_port, int type) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); _enter("%s", port->name); port->uart.type = PORT_MN10300; if (port->options & MNSCx_OPT_CTS) port->uart.type = PORT_MN10300_CTS; mn10300_serial_request_port(_port); } /* * verify serial parameters are suitable for this port type */ static int mn10300_serial_verify_port(struct uart_port *_port, struct serial_struct *ss) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); void *mapbase = (void *) (unsigned long) port->uart.mapbase; _enter("%s", port->name); /* these things may not be changed */ if (ss->irq != port->uart.irq || ss->port != port->uart.iobase || ss->io_type != port->uart.iotype || ss->iomem_base != mapbase || ss->iomem_reg_shift != port->uart.regshift || ss->hub6 != port->uart.hub6 || ss->xmit_fifo_size != port->uart.fifosize) return -EINVAL; /* type may be changed on a port that supports CTS */ if (ss->type != port->uart.type) { if (!(port->options & MNSCx_OPT_CTS)) return -EINVAL; if (ss->type != PORT_MN10300 && ss->type != PORT_MN10300_CTS) return -EINVAL; } return 0; } /* * initialise the MN10300 on-chip UARTs */ static int __init mn10300_serial_init(void) { struct mn10300_serial_port *port; int ret, i; printk(KERN_INFO "%s version %s (%s)\n", serial_name, serial_version, serial_revdate); #ifdef CONFIG_MN10300_TTYSM2 SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */ #endif set_intr_stub(EXCEP_IRQ_LEVEL1, mn10300_serial_vdma_interrupt); ret = uart_register_driver(&mn10300_serial_driver); if (!ret) { for (i = 0 ; i < NR_PORTS ; i++) { port = mn10300_serial_ports[i]; if (!port || port->gdbstub) continue; switch (port->clock_src) { case MNSCx_CLOCK_SRC_IOCLK: port->ioclk = MN10300_IOCLK; break; #ifdef MN10300_IOBCLK case MNSCx_CLOCK_SRC_IOBCLK: port->ioclk = MN10300_IOBCLK; break; #endif default: BUG(); } ret = uart_add_one_port(&mn10300_serial_driver, &port->uart); if (ret < 0) { _debug("ERROR %d", -ret); break; } } if (ret) uart_unregister_driver(&mn10300_serial_driver); } return ret; } __initcall(mn10300_serial_init); #ifdef CONFIG_MN10300_TTYSM_CONSOLE /* * print a string to the serial port without disturbing the real user of the * port too much * - the console must be locked by the caller */ static void mn10300_serial_console_write(struct console *co, const char *s, unsigned count) { struct mn10300_serial_port *port; unsigned i; u16 scxctr, txicr, tmp; u8 tmxmd; port = mn10300_serial_ports[co->index]; /* firstly hijack the serial port from the "virtual DMA" controller */ txicr = *port->tx_icr; *port->tx_icr = GxICR_LEVEL_1; tmp = *port->tx_icr; /* the transmitter may be disabled */ scxctr = *port->_control; if (!(scxctr & SC01CTR_TXE)) { /* restart the UART clock */ tmxmd = *port->_tmxmd; switch (port->div_timer) { case MNSCx_DIV_TIMER_16BIT: *port->_tmxmd = 0; *port->_tmxmd = TM8MD_INIT_COUNTER; *port->_tmxmd = tmxmd | TM8MD_COUNT_ENABLE; break; case MNSCx_DIV_TIMER_8BIT: *port->_tmxmd = 0; *port->_tmxmd = TM2MD_INIT_COUNTER; *port->_tmxmd = tmxmd | TM2MD_COUNT_ENABLE; break; } /* enable the transmitter */ *port->_control = (scxctr & ~SC01CTR_BKE) | SC01CTR_TXE; } else if (scxctr & SC01CTR_BKE) { /* stop transmitting BREAK */ *port->_control = (scxctr & ~SC01CTR_BKE); } /* send the chars into the serial port (with LF -> LFCR conversion) */ for (i = 0; i < count; i++) { char ch = *s++; while (*port->_status & SC01STR_TBF) continue; *(u8 *) port->_txb = ch; if (ch == 0x0a) { while (*port->_status & SC01STR_TBF) continue; *(u8 *) port->_txb = 0xd; } } /* can't let the transmitter be turned off if it's actually * transmitting */ while (*port->_status & (SC01STR_TXF | SC01STR_TBF)) continue; /* disable the transmitter if we re-enabled it */ if (!(scxctr & SC01CTR_TXE)) *port->_control = scxctr; *port->tx_icr = txicr; tmp = *port->tx_icr; } /* * set up a serial port as a console * - construct a cflag setting for the first rs_open() * - initialize the serial port * - return non-zero if we didn't find a serial port. */ static int __init mn10300_serial_console_setup(struct console *co, char *options) { struct mn10300_serial_port *port; int i, parity = 'n', baud = 9600, bits = 8, flow = 0; for (i = 0 ; i < NR_PORTS ; i++) { port = mn10300_serial_ports[i]; if (port && !port->gdbstub && port->uart.line == co->index) goto found_device; } return -ENODEV; found_device: switch (port->clock_src) { case MNSCx_CLOCK_SRC_IOCLK: port->ioclk = MN10300_IOCLK; break; #ifdef MN10300_IOBCLK case MNSCx_CLOCK_SRC_IOBCLK: port->ioclk = MN10300_IOBCLK; break; #endif default: BUG(); } if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); return uart_set_options(&port->uart, co, baud, parity, bits, flow); } /* * register console */ static int __init mn10300_serial_console_init(void) { register_console(&mn10300_serial_console); return 0; } console_initcall(mn10300_serial_console_init); #endif
gpl-2.0
cavium-thunderx-open-source/linux
drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
763
84504
/* * Copyright (C) 2012-2013 Renesas Solutions Corp. * Copyright (C) 2013 Magnus Damm * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/io.h> #include <linux/kernel.h> #include <linux/pinctrl/pinconf-generic.h> #ifndef CONFIG_ARCH_MULTIPLATFORM #include <mach/irqs.h> #endif #include "core.h" #include "sh_pfc.h" #define CPU_ALL_PORT(fn, pfx, sfx) \ /* Port0 - Port30 */ \ PORT_10(0, fn, pfx, sfx), \ PORT_10(10, fn, pfx##1, sfx), \ PORT_10(20, fn, pfx##2, sfx), \ PORT_1(30, fn, pfx##30, sfx), \ /* Port32 - Port40 */ \ PORT_1(32, fn, pfx##32, sfx), PORT_1(33, fn, pfx##33, sfx), \ PORT_1(34, fn, pfx##34, sfx), PORT_1(35, fn, pfx##35, sfx), \ PORT_1(36, fn, pfx##36, sfx), PORT_1(37, fn, pfx##37, sfx), \ PORT_1(38, fn, pfx##38, sfx), PORT_1(39, fn, pfx##39, sfx), \ PORT_1(40, fn, pfx##40, sfx), \ /* Port64 - Port85 */ \ PORT_1(64, fn, pfx##64, sfx), PORT_1(65, fn, pfx##65, sfx), \ PORT_1(66, fn, pfx##66, sfx), PORT_1(67, fn, pfx##67, sfx), \ PORT_1(68, fn, pfx##68, sfx), PORT_1(69, fn, pfx##69, sfx), \ PORT_10(70, fn, pfx##7, sfx), \ PORT_1(80, fn, pfx##80, sfx), PORT_1(81, fn, pfx##81, sfx), \ PORT_1(82, fn, pfx##82, sfx), PORT_1(83, fn, pfx##83, sfx), \ PORT_1(84, fn, pfx##84, sfx), PORT_1(85, fn, pfx##85, sfx), \ /* Port96 - Port126 */ \ PORT_1(96, fn, pfx##96, sfx), PORT_1(97, fn, pfx##97, sfx), \ PORT_1(98, fn, pfx##98, sfx), PORT_1(99, fn, pfx##99, sfx), \ PORT_10(100, fn, pfx##10, sfx), \ PORT_10(110, fn, pfx##11, sfx), \ PORT_1(120, fn, pfx##120, sfx), PORT_1(121, fn, pfx##121, sfx), \ PORT_1(122, fn, pfx##122, sfx), PORT_1(123, fn, pfx##123, sfx), \ PORT_1(124, fn, pfx##124, sfx), PORT_1(125, fn, pfx##125, sfx), \ PORT_1(126, fn, pfx##126, sfx), \ /* Port128 - Port134 */ \ PORT_1(128, fn, pfx##128, sfx), PORT_1(129, fn, pfx##129, sfx), \ PORT_1(130, fn, pfx##130, sfx), PORT_1(131, fn, pfx##131, sfx), \ PORT_1(132, fn, pfx##132, sfx), PORT_1(133, fn, pfx##133, sfx), \ PORT_1(134, fn, pfx##134, sfx), \ /* Port160 - Port178 */ \ PORT_10(160, fn, pfx##16, sfx), \ PORT_1(170, fn, pfx##170, sfx), PORT_1(171, fn, pfx##171, sfx), \ PORT_1(172, fn, pfx##172, sfx), PORT_1(173, fn, pfx##173, sfx), \ PORT_1(174, fn, pfx##174, sfx), PORT_1(175, fn, pfx##175, sfx), \ PORT_1(176, fn, pfx##176, sfx), PORT_1(177, fn, pfx##177, sfx), \ PORT_1(178, fn, pfx##178, sfx), \ /* Port192 - Port222 */ \ PORT_1(192, fn, pfx##192, sfx), PORT_1(193, fn, pfx##193, sfx), \ PORT_1(194, fn, pfx##194, sfx), PORT_1(195, fn, pfx##195, sfx), \ PORT_1(196, fn, pfx##196, sfx), PORT_1(197, fn, pfx##197, sfx), \ PORT_1(198, fn, pfx##198, sfx), PORT_1(199, fn, pfx##199, sfx), \ PORT_10(200, fn, pfx##20, sfx), \ PORT_10(210, fn, pfx##21, sfx), \ PORT_1(220, fn, pfx##220, sfx), PORT_1(221, fn, pfx##221, sfx), \ PORT_1(222, fn, pfx##222, sfx), \ /* Port224 - Port250 */ \ PORT_1(224, fn, pfx##224, sfx), PORT_1(225, fn, pfx##225, sfx), \ PORT_1(226, fn, pfx##226, sfx), PORT_1(227, fn, pfx##227, sfx), \ PORT_1(228, fn, pfx##228, sfx), PORT_1(229, fn, pfx##229, sfx), \ PORT_10(230, fn, pfx##23, sfx), \ PORT_10(240, fn, pfx##24, sfx), \ PORT_1(250, fn, pfx##250, sfx), \ /* Port256 - Port283 */ \ PORT_1(256, fn, pfx##256, sfx), PORT_1(257, fn, pfx##257, sfx), \ PORT_1(258, fn, pfx##258, sfx), PORT_1(259, fn, pfx##259, sfx), \ PORT_10(260, fn, pfx##26, sfx), \ PORT_10(270, fn, pfx##27, sfx), \ PORT_1(280, fn, pfx##280, sfx), PORT_1(281, fn, pfx##281, sfx), \ PORT_1(282, fn, pfx##282, sfx), PORT_1(283, fn, pfx##283, sfx), \ /* Port288 - Port308 */ \ PORT_1(288, fn, pfx##288, sfx), PORT_1(289, fn, pfx##289, sfx), \ PORT_10(290, fn, pfx##29, sfx), \ PORT_1(300, fn, pfx##300, sfx), PORT_1(301, fn, pfx##301, sfx), \ PORT_1(302, fn, pfx##302, sfx), PORT_1(303, fn, pfx##303, sfx), \ PORT_1(304, fn, pfx##304, sfx), PORT_1(305, fn, pfx##305, sfx), \ PORT_1(306, fn, pfx##306, sfx), PORT_1(307, fn, pfx##307, sfx), \ PORT_1(308, fn, pfx##308, sfx), \ /* Port320 - Port329 */ \ PORT_10(320, fn, pfx##32, sfx) enum { PINMUX_RESERVED = 0, /* PORT0_DATA -> PORT329_DATA */ PINMUX_DATA_BEGIN, PORT_ALL(DATA), PINMUX_DATA_END, /* PORT0_IN -> PORT329_IN */ PINMUX_INPUT_BEGIN, PORT_ALL(IN), PINMUX_INPUT_END, /* PORT0_OUT -> PORT329_OUT */ PINMUX_OUTPUT_BEGIN, PORT_ALL(OUT), PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT329_FN_IN */ PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT329_FN_OUT */ PORT_ALL(FN0), /* PORT0_FN0 -> PORT329_FN0 */ PORT_ALL(FN1), /* PORT0_FN1 -> PORT329_FN1 */ PORT_ALL(FN2), /* PORT0_FN2 -> PORT329_FN2 */ PORT_ALL(FN3), /* PORT0_FN3 -> PORT329_FN3 */ PORT_ALL(FN4), /* PORT0_FN4 -> PORT329_FN4 */ PORT_ALL(FN5), /* PORT0_FN5 -> PORT329_FN5 */ PORT_ALL(FN6), /* PORT0_FN6 -> PORT329_FN6 */ PORT_ALL(FN7), /* PORT0_FN7 -> PORT329_FN7 */ MSEL1CR_31_0, MSEL1CR_31_1, MSEL1CR_27_0, MSEL1CR_27_1, MSEL1CR_25_0, MSEL1CR_25_1, MSEL1CR_24_0, MSEL1CR_24_1, MSEL1CR_22_0, MSEL1CR_22_1, MSEL1CR_21_0, MSEL1CR_21_1, MSEL1CR_20_0, MSEL1CR_20_1, MSEL1CR_19_0, MSEL1CR_19_1, MSEL1CR_18_0, MSEL1CR_18_1, MSEL1CR_17_0, MSEL1CR_17_1, MSEL1CR_16_0, MSEL1CR_16_1, MSEL1CR_15_0, MSEL1CR_15_1, MSEL1CR_14_0, MSEL1CR_14_1, MSEL1CR_13_0, MSEL1CR_13_1, MSEL1CR_12_0, MSEL1CR_12_1, MSEL1CR_11_0, MSEL1CR_11_1, MSEL1CR_10_0, MSEL1CR_10_1, MSEL1CR_09_0, MSEL1CR_09_1, MSEL1CR_08_0, MSEL1CR_08_1, MSEL1CR_07_0, MSEL1CR_07_1, MSEL1CR_06_0, MSEL1CR_06_1, MSEL1CR_05_0, MSEL1CR_05_1, MSEL1CR_04_0, MSEL1CR_04_1, MSEL1CR_03_0, MSEL1CR_03_1, MSEL1CR_02_0, MSEL1CR_02_1, MSEL1CR_01_0, MSEL1CR_01_1, MSEL1CR_00_0, MSEL1CR_00_1, MSEL3CR_31_0, MSEL3CR_31_1, MSEL3CR_28_0, MSEL3CR_28_1, MSEL3CR_27_0, MSEL3CR_27_1, MSEL3CR_26_0, MSEL3CR_26_1, MSEL3CR_23_0, MSEL3CR_23_1, MSEL3CR_22_0, MSEL3CR_22_1, MSEL3CR_21_0, MSEL3CR_21_1, MSEL3CR_20_0, MSEL3CR_20_1, MSEL3CR_19_0, MSEL3CR_19_1, MSEL3CR_18_0, MSEL3CR_18_1, MSEL3CR_17_0, MSEL3CR_17_1, MSEL3CR_16_0, MSEL3CR_16_1, MSEL3CR_15_0, MSEL3CR_15_1, MSEL3CR_12_0, MSEL3CR_12_1, MSEL3CR_11_0, MSEL3CR_11_1, MSEL3CR_10_0, MSEL3CR_10_1, MSEL3CR_09_0, MSEL3CR_09_1, MSEL3CR_06_0, MSEL3CR_06_1, MSEL3CR_03_0, MSEL3CR_03_1, MSEL3CR_01_0, MSEL3CR_01_1, MSEL3CR_00_0, MSEL3CR_00_1, MSEL4CR_30_0, MSEL4CR_30_1, MSEL4CR_29_0, MSEL4CR_29_1, MSEL4CR_28_0, MSEL4CR_28_1, MSEL4CR_27_0, MSEL4CR_27_1, MSEL4CR_26_0, MSEL4CR_26_1, MSEL4CR_25_0, MSEL4CR_25_1, MSEL4CR_24_0, MSEL4CR_24_1, MSEL4CR_23_0, MSEL4CR_23_1, MSEL4CR_22_0, MSEL4CR_22_1, MSEL4CR_21_0, MSEL4CR_21_1, MSEL4CR_20_0, MSEL4CR_20_1, MSEL4CR_19_0, MSEL4CR_19_1, MSEL4CR_18_0, MSEL4CR_18_1, MSEL4CR_17_0, MSEL4CR_17_1, MSEL4CR_16_0, MSEL4CR_16_1, MSEL4CR_15_0, MSEL4CR_15_1, MSEL4CR_14_0, MSEL4CR_14_1, MSEL4CR_13_0, MSEL4CR_13_1, MSEL4CR_12_0, MSEL4CR_12_1, MSEL4CR_11_0, MSEL4CR_11_1, MSEL4CR_10_0, MSEL4CR_10_1, MSEL4CR_09_0, MSEL4CR_09_1, MSEL4CR_07_0, MSEL4CR_07_1, MSEL4CR_04_0, MSEL4CR_04_1, MSEL4CR_01_0, MSEL4CR_01_1, MSEL5CR_31_0, MSEL5CR_31_1, MSEL5CR_30_0, MSEL5CR_30_1, MSEL5CR_29_0, MSEL5CR_29_1, MSEL5CR_28_0, MSEL5CR_28_1, MSEL5CR_27_0, MSEL5CR_27_1, MSEL5CR_26_0, MSEL5CR_26_1, MSEL5CR_25_0, MSEL5CR_25_1, MSEL5CR_24_0, MSEL5CR_24_1, MSEL5CR_23_0, MSEL5CR_23_1, MSEL5CR_22_0, MSEL5CR_22_1, MSEL5CR_21_0, MSEL5CR_21_1, MSEL5CR_20_0, MSEL5CR_20_1, MSEL5CR_19_0, MSEL5CR_19_1, MSEL5CR_18_0, MSEL5CR_18_1, MSEL5CR_17_0, MSEL5CR_17_1, MSEL5CR_16_0, MSEL5CR_16_1, MSEL5CR_15_0, MSEL5CR_15_1, MSEL5CR_14_0, MSEL5CR_14_1, MSEL5CR_13_0, MSEL5CR_13_1, MSEL5CR_12_0, MSEL5CR_12_1, MSEL5CR_11_0, MSEL5CR_11_1, MSEL5CR_10_0, MSEL5CR_10_1, MSEL5CR_09_0, MSEL5CR_09_1, MSEL5CR_08_0, MSEL5CR_08_1, MSEL5CR_07_0, MSEL5CR_07_1, MSEL5CR_06_0, MSEL5CR_06_1, MSEL8CR_16_0, MSEL8CR_16_1, MSEL8CR_01_0, MSEL8CR_01_1, MSEL8CR_00_0, MSEL8CR_00_1, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, #define F1(a) a##_MARK #define F2(a) a##_MARK #define F3(a) a##_MARK #define F4(a) a##_MARK #define F5(a) a##_MARK #define F6(a) a##_MARK #define F7(a) a##_MARK #define IRQ(a) IRQ##a##_MARK F1(LCDD0), F3(PDM2_CLK_0), F7(DU0_DR0), IRQ(0), /* Port0 */ F1(LCDD1), F3(PDM2_DATA_1), F7(DU0_DR19), IRQ(1), F1(LCDD2), F3(PDM3_CLK_2), F7(DU0_DR2), IRQ(2), F1(LCDD3), F3(PDM3_DATA_3), F7(DU0_DR3), IRQ(3), F1(LCDD4), F3(PDM4_CLK_4), F7(DU0_DR4), IRQ(4), F1(LCDD5), F3(PDM4_DATA_5), F7(DU0_DR5), IRQ(5), F1(LCDD6), F3(PDM0_OUTCLK_6), F7(DU0_DR6), IRQ(6), F1(LCDD7), F3(PDM0_OUTDATA_7), F7(DU0_DR7), IRQ(7), F1(LCDD8), F3(PDM1_OUTCLK_8), F7(DU0_DG0), IRQ(8), F1(LCDD9), F3(PDM1_OUTDATA_9), F7(DU0_DG1), IRQ(9), F1(LCDD10), F3(FSICCK), F7(DU0_DG2), IRQ(10), /* Port10 */ F1(LCDD11), F3(FSICISLD), F7(DU0_DG3), IRQ(11), F1(LCDD12), F3(FSICOMC), F7(DU0_DG4), IRQ(12), F1(LCDD13), F3(FSICOLR), F4(FSICILR), F7(DU0_DG5), IRQ(13), F1(LCDD14), F3(FSICOBT), F4(FSICIBT), F7(DU0_DG6), IRQ(14), F1(LCDD15), F3(FSICOSLD), F7(DU0_DG7), IRQ(15), F1(LCDD16), F4(TPU1TO1), F7(DU0_DB0), F1(LCDD17), F4(SF_IRQ_00), F7(DU0_DB1), F1(LCDD18), F4(SF_IRQ_01), F7(DU0_DB2), F1(LCDD19), F3(SCIFB3_RTS_19), F7(DU0_DB3), F1(LCDD20), F3(SCIFB3_CTS_20), F7(DU0_DB4), /* Port20 */ F1(LCDD21), F3(SCIFB3_TXD_21), F7(DU0_DB5), F1(LCDD22), F3(SCIFB3_RXD_22), F7(DU0_DB6), F1(LCDD23), F3(SCIFB3_SCK_23), F7(DU0_DB7), F1(LCDHSYN), F2(LCDCS), F3(SCIFB1_RTS_24), F7(DU0_EXHSYNC_N_CSYNC_N_HSYNC_N), F1(LCDVSYN), F3(SCIFB1_CTS_25), F7(DU0_EXVSYNC_N_VSYNC_N_CSYNC_N), F1(LCDDCK), F2(LCDWR), F3(SCIFB1_TXD_26), F7(DU0_DOTCLKIN), F1(LCDDISP), F2(LCDRS), F3(SCIFB1_RXD_27), F7(DU0_DOTCLKOUT), F1(LCDRD_N), F3(SCIFB1_SCK_28), F7(DU0_DOTCLKOUTB), F1(LCDLCLK), F4(SF_IRQ_02), F7(DU0_DISP_CSYNC_N_DE), F1(LCDDON), F4(SF_IRQ_03), F7(DU0_ODDF_N_CLAMP), /* Port30 */ F1(SCIFA0_RTS), F5(SIM0_DET), F7(CSCIF0_RTS), /* Port32 */ F1(SCIFA0_CTS), F5(SIM1_DET), F7(CSCIF0_CTS), F1(SCIFA0_SCK), F5(SIM0_PWRON), F7(CSCIF0_SCK), F1(SCIFA1_RTS), F7(CSCIF1_RTS), F1(SCIFA1_CTS), F7(CSCIF1_CTS), F1(SCIFA1_SCK), F7(CSCIF1_SCK), F1(SCIFB0_RTS), F3(TPU0TO1), F4(SCIFB3_RTS_38), F7(CHSCIF0_HRTS), F1(SCIFB0_CTS), F3(TPU0TO2), F4(SCIFB3_CTS_39), F7(CHSCIF0_HCTS), F1(SCIFB0_SCK), F3(TPU0TO3), F4(SCIFB3_SCK_40), F7(CHSCIF0_HSCK), /* Port40 */ F1(PDM0_DATA), /* Port64 */ F1(PDM1_DATA), F1(HSI_RX_WAKE), F2(SCIFB2_CTS_66), F3(MSIOF3_SYNC), F5(GenIO4), IRQ(40), F1(HSI_RX_READY), F2(SCIFB1_TXD_67), F5(GIO_OUT3_67), F7(CHSCIF1_HTX), F1(HSI_RX_FLAG), F2(SCIFB2_TXD_68), F3(MSIOF3_TXD), F5(GIO_OUT4_68), F1(HSI_RX_DATA), F2(SCIFB2_RXD_69), F3(MSIOF3_RXD), F5(GIO_OUT5_69), F1(HSI_TX_FLAG), F2(SCIFB1_RTS_70), F5(GIO_OUT1_70), F6(HSIC_TSTCLK0), F7(CHSCIF1_HRTS), /* Port70 */ F1(HSI_TX_DATA), F2(SCIFB1_CTS_71), F5(GIO_OUT2_71), F6(HSIC_TSTCLK1), F7(CHSCIF1_HCTS), F1(HSI_TX_WAKE), F2(SCIFB1_RXD_72), F5(GenIO8), F7(CHSCIF1_HRX), F1(HSI_TX_READY), F2(SCIFB2_RTS_73), F3(MSIOF3_SCK), F5(GIO_OUT0_73), F1(IRDA_OUT), F1(IRDA_IN), F1(IRDA_FIRSEL), F1(TPU0TO0), F1(DIGRFEN), F1(GPS_TIMESTAMP), F1(TXP), /* Port80 */ F1(TXP2), F1(COEX_0), F1(COEX_1), IRQ(19), IRQ(18), /* Port85 */ F1(KEYIN0), /* Port96 */ F1(KEYIN1), F1(KEYIN2), F1(KEYIN3), F1(KEYIN4), /* Port100 */ F1(KEYIN5), F1(KEYIN6), IRQ(41), F1(KEYIN7), IRQ(42), F2(KEYOUT0), F2(KEYOUT1), F2(KEYOUT2), F2(KEYOUT3), F2(KEYOUT4), F2(KEYOUT5), IRQ(43), F2(KEYOUT6), IRQ(44), /* Port110 */ F2(KEYOUT7), F5(RFANAEN), IRQ(45), F1(KEYIN8), F2(KEYOUT8), F4(SF_IRQ_04), IRQ(46), F1(KEYIN9), F2(KEYOUT9), F4(SF_IRQ_05), IRQ(47), F1(KEYIN10), F2(KEYOUT10), F4(SF_IRQ_06), IRQ(48), F1(KEYIN11), F2(KEYOUT11), F4(SF_IRQ_07), IRQ(49), F1(SCIFA0_TXD), F7(CSCIF0_TX), F1(SCIFA0_RXD), F7(CSCIF0_RX), F1(SCIFA1_TXD), F7(CSCIF1_TX), F1(SCIFA1_RXD), F7(CSCIF1_RX), F3(SF_PORT_1_120), F4(SCIFB3_RXD_120), F7(DU0_CDE), /* Port120 */ F3(SF_PORT_0_121), F4(SCIFB3_TXD_121), F1(SCIFB0_TXD), F7(CHSCIF0_HTX), F1(SCIFB0_RXD), F7(CHSCIF0_HRX), F3(ISP_STROBE_124), F1(STP_ISD_0), F2(PDM4_CLK_125), F3(MSIOF2_TXD), F5(SIM0_VOLTSEL0), F1(TS_SDEN), F2(MSIOF7_SYNC), F3(STP_ISEN_1), F1(STP_ISEN_0), F2(PDM1_OUTDATA_128), F3(MSIOF2_SYNC), F5(SIM1_VOLTSEL1), F1(TS_SPSYNC), F2(MSIOF7_RXD), F3(STP_ISSYNC_1), F1(STP_ISSYNC_0), F2(PDM4_DATA_130), F3(MSIOF2_RXD), F5(SIM0_VOLTSEL1), /* Port130 */ F1(STP_OPWM_0), F5(SIM1_PWRON), F1(TS_SCK), F2(MSIOF7_SCK), F3(STP_ISCLK_1), F1(STP_ISCLK_0), F2(PDM1_OUTCLK_133), F3(MSIOF2_SCK), F5(SIM1_VOLTSEL0), F1(TS_SDAT), F2(MSIOF7_TXD), F3(STP_ISD_1), IRQ(20), /* Port160 */ IRQ(21), IRQ(22), IRQ(23), F1(MMCD0_0), F1(MMCD0_1), F1(MMCD0_2), F1(MMCD0_3), F1(MMCD0_4), F1(MMCD0_5), F1(MMCD0_6), /* Port170 */ F1(MMCD0_7), F1(MMCCMD0), F1(MMCCLK0), F1(MMCRST), IRQ(24), IRQ(25), IRQ(26), IRQ(27), F1(A10), F2(MMCD1_7), IRQ(31), /* Port192 */ F1(A9), F2(MMCD1_6), IRQ(32), F1(A8), F2(MMCD1_5), IRQ(33), F1(A7), F2(MMCD1_4), IRQ(34), F1(A6), F2(MMCD1_3), IRQ(35), F1(A5), F2(MMCD1_2), IRQ(36), F1(A4), F2(MMCD1_1), IRQ(37), F1(A3), F2(MMCD1_0), IRQ(38), F1(A2), F2(MMCCMD1), IRQ(39), /* Port200 */ F1(A1), F1(A0), F2(BS), F1(CKO), F2(MMCCLK1), F1(CS0_N), F5(SIM0_GPO1), F1(CS2_N), F5(SIM0_GPO2), F1(CS4_N), F2(VIO_VD), F5(SIM1_GPO0), F1(D15), F5(GIO_OUT15), F1(D14), F5(GIO_OUT14), F1(D13), F5(GIO_OUT13), F1(D12), F5(GIO_OUT12), /* Port210 */ F1(D11), F5(WGM_TXP2), F1(D10), F5(WGM_GPS_TIMEM_ASK_RFCLK), F1(D9), F2(VIO_D9), F5(GIO_OUT9), F1(D8), F2(VIO_D8), F5(GIO_OUT8), F1(D7), F2(VIO_D7), F5(GIO_OUT7), F1(D6), F2(VIO_D6), F5(GIO_OUT6), F1(D5), F2(VIO_D5), F5(GIO_OUT5_217), F1(D4), F2(VIO_D4), F5(GIO_OUT4_218), F1(D3), F2(VIO_D3), F5(GIO_OUT3_219), F1(D2), F2(VIO_D2), F5(GIO_OUT2_220), /* Port220 */ F1(D1), F2(VIO_D1), F5(GIO_OUT1_221), F1(D0), F2(VIO_D0), F5(GIO_OUT0_222), F1(RDWR_224), F2(VIO_HD), F5(SIM1_GPO2), F1(RD_N), F1(WAIT_N), F2(VIO_CLK), F5(SIM1_GPO1), F1(WE0_N), F2(RDWR_227), F1(WE1_N), F5(SIM0_GPO0), F1(PWMO), F2(VIO_CKO1_229), F1(SLIM_CLK), F2(VIO_CKO4_230), /* Port230 */ F1(SLIM_DATA), F2(VIO_CKO5_231), F2(VIO_CKO2_232), F4(SF_PORT_0_232), F2(VIO_CKO3_233), F4(SF_PORT_1_233), F1(FSIACK), F2(PDM3_CLK_234), F3(ISP_IRIS1_234), F1(FSIAISLD), F2(PDM3_DATA_235), F1(FSIAOMC), F2(PDM0_OUTCLK_236), F3(ISP_IRIS0_236), F1(FSIAOLR), F2(FSIAILR), F1(FSIAOBT), F2(FSIAIBT), F1(FSIAOSLD), F2(PDM0_OUTDATA_239), F1(FSIBISLD), /* Port240 */ F1(FSIBOLR), F2(FSIBILR), F1(FSIBOMC), F3(ISP_SHUTTER1_242), F1(FSIBOBT), F2(FSIBIBT), F1(FSIBOSLD), F2(FSIASPDIF), F1(FSIBCK), F3(ISP_SHUTTER0_245), F1(ISP_IRIS1_246), F1(ISP_IRIS0_247), F1(ISP_SHUTTER1_248), F1(ISP_SHUTTER0_249), F1(ISP_STROBE_250), /* Port250 */ F1(MSIOF0_SYNC), F1(MSIOF0_RXD), F1(MSIOF0_SCK), F1(MSIOF0_SS2), F3(VIO_CKO3_259), F1(MSIOF0_TXD), /* Port260 */ F2(SCIFB1_SCK_261), F7(CHSCIF1_HSCK), F2(SCIFB2_SCK_262), F1(MSIOF1_SS2), F4(MSIOF5_SS2), F1(MSIOF1_TXD), F4(MSIOF5_TXD), F1(MSIOF1_RXD), F4(MSIOF5_RXD), F1(MSIOF1_SS1), F4(MSIOF5_SS1), F1(MSIOF0_SS1), F1(MSIOF1_SCK), F4(MSIOF5_SCK), F1(MSIOF1_SYNC), F4(MSIOF5_SYNC), F1(MSIOF2_SS1), F3(VIO_CKO5_270), /* Port270 */ F1(MSIOF2_SS2), F3(VIO_CKO2_271), F1(MSIOF3_SS2), F3(VIO_CKO1_272), F1(MSIOF3_SS1), F3(VIO_CKO4_273), F1(MSIOF4_SS2), F4(TPU1TO0), F1(IC_DP), F1(SIM0_RST), F1(IC_DM), F1(SIM0_BSICOMP), F1(SIM0_CLK), F1(SIM0_IO), /* Port280 */ F1(SIM1_IO), F2(PDM2_DATA_281), F1(SIM1_CLK), F2(PDM2_CLK_282), F1(SIM1_RST), F1(SDHID1_0), F3(STMDATA0_2), F1(SDHID1_1), F3(STMDATA1_2), IRQ(51), /* Port290 */ F1(SDHID1_2), F3(STMDATA2_2), F1(SDHID1_3), F3(STMDATA3_2), F1(SDHICLK1), F3(STMCLK_2), F1(SDHICMD1), F3(STMSIDI_2), F1(SDHID2_0), F2(MSIOF4_TXD), F3(SCIFB2_TXD_295), F4(MSIOF6_TXD), F1(SDHID2_1), F4(MSIOF6_SS2), IRQ(52), F1(SDHID2_2), F2(MSIOF4_RXD), F3(SCIFB2_RXD_297), F4(MSIOF6_RXD), F1(SDHID2_3), F2(MSIOF4_SYNC), F3(SCIFB2_CTS_298), F4(MSIOF6_SYNC), F1(SDHICLK2), F2(MSIOF4_SCK), F3(SCIFB2_SCK_299), F4(MSIOF6_SCK), F1(SDHICMD2), F2(MSIOF4_SS1), F3(SCIFB2_RTS_300), F4(MSIOF6_SS1), /* Port300 */ F1(SDHICD0), IRQ(50), F1(SDHID0_0), F3(STMDATA0_1), F1(SDHID0_1), F3(STMDATA1_1), F1(SDHID0_2), F3(STMDATA2_1), F1(SDHID0_3), F3(STMDATA3_1), F1(SDHICMD0), F3(STMSIDI_1), F1(SDHIWP0), F1(SDHICLK0), F3(STMCLK_1), IRQ(16), /* Port320 */ IRQ(17), IRQ(28), IRQ(29), IRQ(30), IRQ(53), IRQ(54), IRQ(55), IRQ(56), IRQ(57), PINMUX_MARK_END, }; static const u16 pinmux_data[] = { /* specify valid pin states for each pin in GPIO mode */ PINMUX_DATA_ALL(), /* Port0 */ PINMUX_DATA(LCDD0_MARK, PORT0_FN1), PINMUX_DATA(PDM2_CLK_0_MARK, PORT0_FN3), PINMUX_DATA(DU0_DR0_MARK, PORT0_FN7), PINMUX_DATA(IRQ0_MARK, PORT0_FN0), /* Port1 */ PINMUX_DATA(LCDD1_MARK, PORT1_FN1), PINMUX_DATA(PDM2_DATA_1_MARK, PORT1_FN3, MSEL3CR_12_0), PINMUX_DATA(DU0_DR19_MARK, PORT1_FN7), PINMUX_DATA(IRQ1_MARK, PORT1_FN0), /* Port2 */ PINMUX_DATA(LCDD2_MARK, PORT2_FN1), PINMUX_DATA(PDM3_CLK_2_MARK, PORT2_FN3), PINMUX_DATA(DU0_DR2_MARK, PORT2_FN7), PINMUX_DATA(IRQ2_MARK, PORT2_FN0), /* Port3 */ PINMUX_DATA(LCDD3_MARK, PORT3_FN1), PINMUX_DATA(PDM3_DATA_3_MARK, PORT3_FN3, MSEL3CR_12_0), PINMUX_DATA(DU0_DR3_MARK, PORT3_FN7), PINMUX_DATA(IRQ3_MARK, PORT3_FN0), /* Port4 */ PINMUX_DATA(LCDD4_MARK, PORT4_FN1), PINMUX_DATA(PDM4_CLK_4_MARK, PORT4_FN3), PINMUX_DATA(DU0_DR4_MARK, PORT4_FN7), PINMUX_DATA(IRQ4_MARK, PORT4_FN0), /* Port5 */ PINMUX_DATA(LCDD5_MARK, PORT5_FN1), PINMUX_DATA(PDM4_DATA_5_MARK, PORT5_FN3, MSEL3CR_12_0), PINMUX_DATA(DU0_DR5_MARK, PORT5_FN7), PINMUX_DATA(IRQ5_MARK, PORT5_FN0), /* Port6 */ PINMUX_DATA(LCDD6_MARK, PORT6_FN1), PINMUX_DATA(PDM0_OUTCLK_6_MARK, PORT6_FN3), PINMUX_DATA(DU0_DR6_MARK, PORT6_FN7), PINMUX_DATA(IRQ6_MARK, PORT6_FN0), /* Port7 */ PINMUX_DATA(LCDD7_MARK, PORT7_FN1), PINMUX_DATA(PDM0_OUTDATA_7_MARK, PORT7_FN3), PINMUX_DATA(DU0_DR7_MARK, PORT7_FN7), PINMUX_DATA(IRQ7_MARK, PORT7_FN0), /* Port8 */ PINMUX_DATA(LCDD8_MARK, PORT8_FN1), PINMUX_DATA(PDM1_OUTCLK_8_MARK, PORT8_FN3), PINMUX_DATA(DU0_DG0_MARK, PORT8_FN7), PINMUX_DATA(IRQ8_MARK, PORT8_FN0), /* Port9 */ PINMUX_DATA(LCDD9_MARK, PORT9_FN1), PINMUX_DATA(PDM1_OUTDATA_9_MARK, PORT9_FN3), PINMUX_DATA(DU0_DG1_MARK, PORT9_FN7), PINMUX_DATA(IRQ9_MARK, PORT9_FN0), /* Port10 */ PINMUX_DATA(LCDD10_MARK, PORT10_FN1), PINMUX_DATA(FSICCK_MARK, PORT10_FN3), PINMUX_DATA(DU0_DG2_MARK, PORT10_FN7), PINMUX_DATA(IRQ10_MARK, PORT10_FN0), /* Port11 */ PINMUX_DATA(LCDD11_MARK, PORT11_FN1), PINMUX_DATA(FSICISLD_MARK, PORT11_FN3), PINMUX_DATA(DU0_DG3_MARK, PORT11_FN7), PINMUX_DATA(IRQ11_MARK, PORT11_FN0), /* Port12 */ PINMUX_DATA(LCDD12_MARK, PORT12_FN1), PINMUX_DATA(FSICOMC_MARK, PORT12_FN3), PINMUX_DATA(DU0_DG4_MARK, PORT12_FN7), PINMUX_DATA(IRQ12_MARK, PORT12_FN0), /* Port13 */ PINMUX_DATA(LCDD13_MARK, PORT13_FN1), PINMUX_DATA(FSICOLR_MARK, PORT13_FN3), PINMUX_DATA(FSICILR_MARK, PORT13_FN4), PINMUX_DATA(DU0_DG5_MARK, PORT13_FN7), PINMUX_DATA(IRQ13_MARK, PORT13_FN0), /* Port14 */ PINMUX_DATA(LCDD14_MARK, PORT14_FN1), PINMUX_DATA(FSICOBT_MARK, PORT14_FN3), PINMUX_DATA(FSICIBT_MARK, PORT14_FN4), PINMUX_DATA(DU0_DG6_MARK, PORT14_FN7), PINMUX_DATA(IRQ14_MARK, PORT14_FN0), /* Port15 */ PINMUX_DATA(LCDD15_MARK, PORT15_FN1), PINMUX_DATA(FSICOSLD_MARK, PORT15_FN3), PINMUX_DATA(DU0_DG7_MARK, PORT15_FN7), PINMUX_DATA(IRQ15_MARK, PORT15_FN0), /* Port16 */ PINMUX_DATA(LCDD16_MARK, PORT16_FN1), PINMUX_DATA(TPU1TO1_MARK, PORT16_FN4), PINMUX_DATA(DU0_DB0_MARK, PORT16_FN7), /* Port17 */ PINMUX_DATA(LCDD17_MARK, PORT17_FN1), PINMUX_DATA(SF_IRQ_00_MARK, PORT17_FN4), PINMUX_DATA(DU0_DB1_MARK, PORT17_FN7), /* Port18 */ PINMUX_DATA(LCDD18_MARK, PORT18_FN1), PINMUX_DATA(SF_IRQ_01_MARK, PORT18_FN4), PINMUX_DATA(DU0_DB2_MARK, PORT18_FN7), /* Port19 */ PINMUX_DATA(LCDD19_MARK, PORT19_FN1), PINMUX_DATA(SCIFB3_RTS_19_MARK, PORT19_FN3), PINMUX_DATA(DU0_DB3_MARK, PORT19_FN7), /* Port20 */ PINMUX_DATA(LCDD20_MARK, PORT20_FN1), PINMUX_DATA(SCIFB3_CTS_20_MARK, PORT20_FN3, MSEL3CR_09_0), PINMUX_DATA(DU0_DB4_MARK, PORT20_FN7), /* Port21 */ PINMUX_DATA(LCDD21_MARK, PORT21_FN1), PINMUX_DATA(SCIFB3_TXD_21_MARK, PORT21_FN3, MSEL3CR_09_0), PINMUX_DATA(DU0_DB5_MARK, PORT21_FN7), /* Port22 */ PINMUX_DATA(LCDD22_MARK, PORT22_FN1), PINMUX_DATA(SCIFB3_RXD_22_MARK, PORT22_FN3, MSEL3CR_09_0), PINMUX_DATA(DU0_DB6_MARK, PORT22_FN7), /* Port23 */ PINMUX_DATA(LCDD23_MARK, PORT23_FN1), PINMUX_DATA(SCIFB3_SCK_23_MARK, PORT23_FN3), PINMUX_DATA(DU0_DB7_MARK, PORT23_FN7), /* Port24 */ PINMUX_DATA(LCDHSYN_MARK, PORT24_FN1), PINMUX_DATA(LCDCS_MARK, PORT24_FN2), PINMUX_DATA(SCIFB1_RTS_24_MARK, PORT24_FN3), PINMUX_DATA(DU0_EXHSYNC_N_CSYNC_N_HSYNC_N_MARK, PORT24_FN7), /* Port25 */ PINMUX_DATA(LCDVSYN_MARK, PORT25_FN1), PINMUX_DATA(SCIFB1_CTS_25_MARK, PORT25_FN3, MSEL3CR_11_0), PINMUX_DATA(DU0_EXVSYNC_N_VSYNC_N_CSYNC_N_MARK, PORT25_FN7), /* Port26 */ PINMUX_DATA(LCDDCK_MARK, PORT26_FN1), PINMUX_DATA(LCDWR_MARK, PORT26_FN2), PINMUX_DATA(SCIFB1_TXD_26_MARK, PORT26_FN3, MSEL3CR_11_0), PINMUX_DATA(DU0_DOTCLKIN_MARK, PORT26_FN7), /* Port27 */ PINMUX_DATA(LCDDISP_MARK, PORT27_FN1), PINMUX_DATA(LCDRS_MARK, PORT27_FN2), PINMUX_DATA(SCIFB1_RXD_27_MARK, PORT27_FN3, MSEL3CR_11_0), PINMUX_DATA(DU0_DOTCLKOUT_MARK, PORT27_FN7), /* Port28 */ PINMUX_DATA(LCDRD_N_MARK, PORT28_FN1), PINMUX_DATA(SCIFB1_SCK_28_MARK, PORT28_FN3), PINMUX_DATA(DU0_DOTCLKOUTB_MARK, PORT28_FN7), /* Port29 */ PINMUX_DATA(LCDLCLK_MARK, PORT29_FN1), PINMUX_DATA(SF_IRQ_02_MARK, PORT29_FN4), PINMUX_DATA(DU0_DISP_CSYNC_N_DE_MARK, PORT29_FN7), /* Port30 */ PINMUX_DATA(LCDDON_MARK, PORT30_FN1), PINMUX_DATA(SF_IRQ_03_MARK, PORT30_FN4), PINMUX_DATA(DU0_ODDF_N_CLAMP_MARK, PORT30_FN7), /* Port32 */ PINMUX_DATA(SCIFA0_RTS_MARK, PORT32_FN1), PINMUX_DATA(SIM0_DET_MARK, PORT32_FN5), PINMUX_DATA(CSCIF0_RTS_MARK, PORT32_FN7), /* Port33 */ PINMUX_DATA(SCIFA0_CTS_MARK, PORT33_FN1), PINMUX_DATA(SIM1_DET_MARK, PORT33_FN5), PINMUX_DATA(CSCIF0_CTS_MARK, PORT33_FN7), /* Port34 */ PINMUX_DATA(SCIFA0_SCK_MARK, PORT34_FN1), PINMUX_DATA(SIM0_PWRON_MARK, PORT34_FN5), PINMUX_DATA(CSCIF0_SCK_MARK, PORT34_FN7), /* Port35 */ PINMUX_DATA(SCIFA1_RTS_MARK, PORT35_FN1), PINMUX_DATA(CSCIF1_RTS_MARK, PORT35_FN7), /* Port36 */ PINMUX_DATA(SCIFA1_CTS_MARK, PORT36_FN1), PINMUX_DATA(CSCIF1_CTS_MARK, PORT36_FN7), /* Port37 */ PINMUX_DATA(SCIFA1_SCK_MARK, PORT37_FN1), PINMUX_DATA(CSCIF1_SCK_MARK, PORT37_FN7), /* Port38 */ PINMUX_DATA(SCIFB0_RTS_MARK, PORT38_FN1), PINMUX_DATA(TPU0TO1_MARK, PORT38_FN3), PINMUX_DATA(SCIFB3_RTS_38_MARK, PORT38_FN4), PINMUX_DATA(CHSCIF0_HRTS_MARK, PORT38_FN7), /* Port39 */ PINMUX_DATA(SCIFB0_CTS_MARK, PORT39_FN1), PINMUX_DATA(TPU0TO2_MARK, PORT39_FN3), PINMUX_DATA(SCIFB3_CTS_39_MARK, PORT39_FN4, MSEL3CR_09_1), PINMUX_DATA(CHSCIF0_HCTS_MARK, PORT39_FN7), /* Port40 */ PINMUX_DATA(SCIFB0_SCK_MARK, PORT40_FN1), PINMUX_DATA(TPU0TO3_MARK, PORT40_FN3), PINMUX_DATA(SCIFB3_SCK_40_MARK, PORT40_FN4), PINMUX_DATA(CHSCIF0_HSCK_MARK, PORT40_FN7), /* Port64 */ PINMUX_DATA(PDM0_DATA_MARK, PORT64_FN1), /* Port65 */ PINMUX_DATA(PDM1_DATA_MARK, PORT65_FN1), /* Port66 */ PINMUX_DATA(HSI_RX_WAKE_MARK, PORT66_FN1), PINMUX_DATA(SCIFB2_CTS_66_MARK, PORT66_FN2, MSEL3CR_10_0), PINMUX_DATA(MSIOF3_SYNC_MARK, PORT66_FN3), PINMUX_DATA(GenIO4_MARK, PORT66_FN5), PINMUX_DATA(IRQ40_MARK, PORT66_FN0), /* Port67 */ PINMUX_DATA(HSI_RX_READY_MARK, PORT67_FN1), PINMUX_DATA(SCIFB1_TXD_67_MARK, PORT67_FN2, MSEL3CR_11_1), PINMUX_DATA(GIO_OUT3_67_MARK, PORT67_FN5), PINMUX_DATA(CHSCIF1_HTX_MARK, PORT67_FN7), /* Port68 */ PINMUX_DATA(HSI_RX_FLAG_MARK, PORT68_FN1), PINMUX_DATA(SCIFB2_TXD_68_MARK, PORT68_FN2, MSEL3CR_10_0), PINMUX_DATA(MSIOF3_TXD_MARK, PORT68_FN3), PINMUX_DATA(GIO_OUT4_68_MARK, PORT68_FN5), /* Port69 */ PINMUX_DATA(HSI_RX_DATA_MARK, PORT69_FN1), PINMUX_DATA(SCIFB2_RXD_69_MARK, PORT69_FN2, MSEL3CR_10_0), PINMUX_DATA(MSIOF3_RXD_MARK, PORT69_FN3), PINMUX_DATA(GIO_OUT5_69_MARK, PORT69_FN5), /* Port70 */ PINMUX_DATA(HSI_TX_FLAG_MARK, PORT70_FN1), PINMUX_DATA(SCIFB1_RTS_70_MARK, PORT70_FN2), PINMUX_DATA(GIO_OUT1_70_MARK, PORT70_FN5), PINMUX_DATA(HSIC_TSTCLK0_MARK, PORT70_FN6), PINMUX_DATA(CHSCIF1_HRTS_MARK, PORT70_FN7), /* Port71 */ PINMUX_DATA(HSI_TX_DATA_MARK, PORT71_FN1), PINMUX_DATA(SCIFB1_CTS_71_MARK, PORT71_FN2, MSEL3CR_11_1), PINMUX_DATA(GIO_OUT2_71_MARK, PORT71_FN5), PINMUX_DATA(HSIC_TSTCLK1_MARK, PORT71_FN6), PINMUX_DATA(CHSCIF1_HCTS_MARK, PORT71_FN7), /* Port72 */ PINMUX_DATA(HSI_TX_WAKE_MARK, PORT72_FN1), PINMUX_DATA(SCIFB1_RXD_72_MARK, PORT72_FN2, MSEL3CR_11_1), PINMUX_DATA(GenIO8_MARK, PORT72_FN5), PINMUX_DATA(CHSCIF1_HRX_MARK, PORT72_FN7), /* Port73 */ PINMUX_DATA(HSI_TX_READY_MARK, PORT73_FN1), PINMUX_DATA(SCIFB2_RTS_73_MARK, PORT73_FN2), PINMUX_DATA(MSIOF3_SCK_MARK, PORT73_FN3), PINMUX_DATA(GIO_OUT0_73_MARK, PORT73_FN5), /* Port74 - Port85 */ PINMUX_DATA(IRDA_OUT_MARK, PORT74_FN1), PINMUX_DATA(IRDA_IN_MARK, PORT75_FN1), PINMUX_DATA(IRDA_FIRSEL_MARK, PORT76_FN1), PINMUX_DATA(TPU0TO0_MARK, PORT77_FN1), PINMUX_DATA(DIGRFEN_MARK, PORT78_FN1), PINMUX_DATA(GPS_TIMESTAMP_MARK, PORT79_FN1), PINMUX_DATA(TXP_MARK, PORT80_FN1), PINMUX_DATA(TXP2_MARK, PORT81_FN1), PINMUX_DATA(COEX_0_MARK, PORT82_FN1), PINMUX_DATA(COEX_1_MARK, PORT83_FN1), PINMUX_DATA(IRQ19_MARK, PORT84_FN0), PINMUX_DATA(IRQ18_MARK, PORT85_FN0), /* Port96 - Port101 */ PINMUX_DATA(KEYIN0_MARK, PORT96_FN1), PINMUX_DATA(KEYIN1_MARK, PORT97_FN1), PINMUX_DATA(KEYIN2_MARK, PORT98_FN1), PINMUX_DATA(KEYIN3_MARK, PORT99_FN1), PINMUX_DATA(KEYIN4_MARK, PORT100_FN1), PINMUX_DATA(KEYIN5_MARK, PORT101_FN1), /* Port102 */ PINMUX_DATA(KEYIN6_MARK, PORT102_FN1), PINMUX_DATA(IRQ41_MARK, PORT102_FN0), /* Port103 */ PINMUX_DATA(KEYIN7_MARK, PORT103_FN1), PINMUX_DATA(IRQ42_MARK, PORT103_FN0), /* Port104 - Port108 */ PINMUX_DATA(KEYOUT0_MARK, PORT104_FN2), PINMUX_DATA(KEYOUT1_MARK, PORT105_FN2), PINMUX_DATA(KEYOUT2_MARK, PORT106_FN2), PINMUX_DATA(KEYOUT3_MARK, PORT107_FN2), PINMUX_DATA(KEYOUT4_MARK, PORT108_FN2), /* Port109 */ PINMUX_DATA(KEYOUT5_MARK, PORT109_FN2), PINMUX_DATA(IRQ43_MARK, PORT109_FN0), /* Port110 */ PINMUX_DATA(KEYOUT6_MARK, PORT110_FN2), PINMUX_DATA(IRQ44_MARK, PORT110_FN0), /* Port111 */ PINMUX_DATA(KEYOUT7_MARK, PORT111_FN2), PINMUX_DATA(RFANAEN_MARK, PORT111_FN5), PINMUX_DATA(IRQ45_MARK, PORT111_FN0), /* Port112 */ PINMUX_DATA(KEYIN8_MARK, PORT112_FN1), PINMUX_DATA(KEYOUT8_MARK, PORT112_FN2), PINMUX_DATA(SF_IRQ_04_MARK, PORT112_FN4), PINMUX_DATA(IRQ46_MARK, PORT112_FN0), /* Port113 */ PINMUX_DATA(KEYIN9_MARK, PORT113_FN1), PINMUX_DATA(KEYOUT9_MARK, PORT113_FN2), PINMUX_DATA(SF_IRQ_05_MARK, PORT113_FN4), PINMUX_DATA(IRQ47_MARK, PORT113_FN0), /* Port114 */ PINMUX_DATA(KEYIN10_MARK, PORT114_FN1), PINMUX_DATA(KEYOUT10_MARK, PORT114_FN2), PINMUX_DATA(SF_IRQ_06_MARK, PORT114_FN4), PINMUX_DATA(IRQ48_MARK, PORT114_FN0), /* Port115 */ PINMUX_DATA(KEYIN11_MARK, PORT115_FN1), PINMUX_DATA(KEYOUT11_MARK, PORT115_FN2), PINMUX_DATA(SF_IRQ_07_MARK, PORT115_FN4), PINMUX_DATA(IRQ49_MARK, PORT115_FN0), /* Port116 */ PINMUX_DATA(SCIFA0_TXD_MARK, PORT116_FN1), PINMUX_DATA(CSCIF0_TX_MARK, PORT116_FN7), /* Port117 */ PINMUX_DATA(SCIFA0_RXD_MARK, PORT117_FN1), PINMUX_DATA(CSCIF0_RX_MARK, PORT117_FN7), /* Port118 */ PINMUX_DATA(SCIFA1_TXD_MARK, PORT118_FN1), PINMUX_DATA(CSCIF1_TX_MARK, PORT118_FN7), /* Port119 */ PINMUX_DATA(SCIFA1_RXD_MARK, PORT119_FN1), PINMUX_DATA(CSCIF1_RX_MARK, PORT119_FN7), /* Port120 */ PINMUX_DATA(SF_PORT_1_120_MARK, PORT120_FN3), PINMUX_DATA(SCIFB3_RXD_120_MARK, PORT120_FN4, MSEL3CR_09_1), PINMUX_DATA(DU0_CDE_MARK, PORT120_FN7), /* Port121 */ PINMUX_DATA(SF_PORT_0_121_MARK, PORT121_FN3), PINMUX_DATA(SCIFB3_TXD_121_MARK, PORT121_FN4, MSEL3CR_09_1), /* Port122 */ PINMUX_DATA(SCIFB0_TXD_MARK, PORT122_FN1), PINMUX_DATA(CHSCIF0_HTX_MARK, PORT122_FN7), /* Port123 */ PINMUX_DATA(SCIFB0_RXD_MARK, PORT123_FN1), PINMUX_DATA(CHSCIF0_HRX_MARK, PORT123_FN7), /* Port124 */ PINMUX_DATA(ISP_STROBE_124_MARK, PORT124_FN3), /* Port125 */ PINMUX_DATA(STP_ISD_0_MARK, PORT125_FN1), PINMUX_DATA(PDM4_CLK_125_MARK, PORT125_FN2), PINMUX_DATA(MSIOF2_TXD_MARK, PORT125_FN3), PINMUX_DATA(SIM0_VOLTSEL0_MARK, PORT125_FN5), /* Port126 */ PINMUX_DATA(TS_SDEN_MARK, PORT126_FN1), PINMUX_DATA(MSIOF7_SYNC_MARK, PORT126_FN2), PINMUX_DATA(STP_ISEN_1_MARK, PORT126_FN3), /* Port128 */ PINMUX_DATA(STP_ISEN_0_MARK, PORT128_FN1), PINMUX_DATA(PDM1_OUTDATA_128_MARK, PORT128_FN2), PINMUX_DATA(MSIOF2_SYNC_MARK, PORT128_FN3), PINMUX_DATA(SIM1_VOLTSEL1_MARK, PORT128_FN5), /* Port129 */ PINMUX_DATA(TS_SPSYNC_MARK, PORT129_FN1), PINMUX_DATA(MSIOF7_RXD_MARK, PORT129_FN2), PINMUX_DATA(STP_ISSYNC_1_MARK, PORT129_FN3), /* Port130 */ PINMUX_DATA(STP_ISSYNC_0_MARK, PORT130_FN1), PINMUX_DATA(PDM4_DATA_130_MARK, PORT130_FN2, MSEL3CR_12_1), PINMUX_DATA(MSIOF2_RXD_MARK, PORT130_FN3), PINMUX_DATA(SIM0_VOLTSEL1_MARK, PORT130_FN5), /* Port131 */ PINMUX_DATA(STP_OPWM_0_MARK, PORT131_FN1), PINMUX_DATA(SIM1_PWRON_MARK, PORT131_FN5), /* Port132 */ PINMUX_DATA(TS_SCK_MARK, PORT132_FN1), PINMUX_DATA(MSIOF7_SCK_MARK, PORT132_FN2), PINMUX_DATA(STP_ISCLK_1_MARK, PORT132_FN3), /* Port133 */ PINMUX_DATA(STP_ISCLK_0_MARK, PORT133_FN1), PINMUX_DATA(PDM1_OUTCLK_133_MARK, PORT133_FN2), PINMUX_DATA(MSIOF2_SCK_MARK, PORT133_FN3), PINMUX_DATA(SIM1_VOLTSEL0_MARK, PORT133_FN5), /* Port134 */ PINMUX_DATA(TS_SDAT_MARK, PORT134_FN1), PINMUX_DATA(MSIOF7_TXD_MARK, PORT134_FN2), PINMUX_DATA(STP_ISD_1_MARK, PORT134_FN3), /* Port160 - Port178 */ PINMUX_DATA(IRQ20_MARK, PORT160_FN0), PINMUX_DATA(IRQ21_MARK, PORT161_FN0), PINMUX_DATA(IRQ22_MARK, PORT162_FN0), PINMUX_DATA(IRQ23_MARK, PORT163_FN0), PINMUX_DATA(MMCD0_0_MARK, PORT164_FN1), PINMUX_DATA(MMCD0_1_MARK, PORT165_FN1), PINMUX_DATA(MMCD0_2_MARK, PORT166_FN1), PINMUX_DATA(MMCD0_3_MARK, PORT167_FN1), PINMUX_DATA(MMCD0_4_MARK, PORT168_FN1), PINMUX_DATA(MMCD0_5_MARK, PORT169_FN1), PINMUX_DATA(MMCD0_6_MARK, PORT170_FN1), PINMUX_DATA(MMCD0_7_MARK, PORT171_FN1), PINMUX_DATA(MMCCMD0_MARK, PORT172_FN1), PINMUX_DATA(MMCCLK0_MARK, PORT173_FN1), PINMUX_DATA(MMCRST_MARK, PORT174_FN1), PINMUX_DATA(IRQ24_MARK, PORT175_FN0), PINMUX_DATA(IRQ25_MARK, PORT176_FN0), PINMUX_DATA(IRQ26_MARK, PORT177_FN0), PINMUX_DATA(IRQ27_MARK, PORT178_FN0), /* Port192 - Port200 FN1 */ PINMUX_DATA(A10_MARK, PORT192_FN1), PINMUX_DATA(A9_MARK, PORT193_FN1), PINMUX_DATA(A8_MARK, PORT194_FN1), PINMUX_DATA(A7_MARK, PORT195_FN1), PINMUX_DATA(A6_MARK, PORT196_FN1), PINMUX_DATA(A5_MARK, PORT197_FN1), PINMUX_DATA(A4_MARK, PORT198_FN1), PINMUX_DATA(A3_MARK, PORT199_FN1), PINMUX_DATA(A2_MARK, PORT200_FN1), /* Port192 - Port200 FN2 */ PINMUX_DATA(MMCD1_7_MARK, PORT192_FN2), PINMUX_DATA(MMCD1_6_MARK, PORT193_FN2), PINMUX_DATA(MMCD1_5_MARK, PORT194_FN2), PINMUX_DATA(MMCD1_4_MARK, PORT195_FN2), PINMUX_DATA(MMCD1_3_MARK, PORT196_FN2), PINMUX_DATA(MMCD1_2_MARK, PORT197_FN2), PINMUX_DATA(MMCD1_1_MARK, PORT198_FN2), PINMUX_DATA(MMCD1_0_MARK, PORT199_FN2), PINMUX_DATA(MMCCMD1_MARK, PORT200_FN2), /* Port192 - Port200 IRQ */ PINMUX_DATA(IRQ31_MARK, PORT192_FN0), PINMUX_DATA(IRQ32_MARK, PORT193_FN0), PINMUX_DATA(IRQ33_MARK, PORT194_FN0), PINMUX_DATA(IRQ34_MARK, PORT195_FN0), PINMUX_DATA(IRQ35_MARK, PORT196_FN0), PINMUX_DATA(IRQ36_MARK, PORT197_FN0), PINMUX_DATA(IRQ37_MARK, PORT198_FN0), PINMUX_DATA(IRQ38_MARK, PORT199_FN0), PINMUX_DATA(IRQ39_MARK, PORT200_FN0), /* Port201 */ PINMUX_DATA(A1_MARK, PORT201_FN1), /* Port202 */ PINMUX_DATA(A0_MARK, PORT202_FN1), PINMUX_DATA(BS_MARK, PORT202_FN2), /* Port203 */ PINMUX_DATA(CKO_MARK, PORT203_FN1), PINMUX_DATA(MMCCLK1_MARK, PORT203_FN2), /* Port204 */ PINMUX_DATA(CS0_N_MARK, PORT204_FN1), PINMUX_DATA(SIM0_GPO1_MARK, PORT204_FN5), /* Port205 */ PINMUX_DATA(CS2_N_MARK, PORT205_FN1), PINMUX_DATA(SIM0_GPO2_MARK, PORT205_FN5), /* Port206 */ PINMUX_DATA(CS4_N_MARK, PORT206_FN1), PINMUX_DATA(VIO_VD_MARK, PORT206_FN2), PINMUX_DATA(SIM1_GPO0_MARK, PORT206_FN5), /* Port207 - Port212 FN1 */ PINMUX_DATA(D15_MARK, PORT207_FN1), PINMUX_DATA(D14_MARK, PORT208_FN1), PINMUX_DATA(D13_MARK, PORT209_FN1), PINMUX_DATA(D12_MARK, PORT210_FN1), PINMUX_DATA(D11_MARK, PORT211_FN1), PINMUX_DATA(D10_MARK, PORT212_FN1), /* Port207 - Port212 FN5 */ PINMUX_DATA(GIO_OUT15_MARK, PORT207_FN5), PINMUX_DATA(GIO_OUT14_MARK, PORT208_FN5), PINMUX_DATA(GIO_OUT13_MARK, PORT209_FN5), PINMUX_DATA(GIO_OUT12_MARK, PORT210_FN5), PINMUX_DATA(WGM_TXP2_MARK, PORT211_FN5), PINMUX_DATA(WGM_GPS_TIMEM_ASK_RFCLK_MARK, PORT212_FN5), /* Port213 - Port222 FN1 */ PINMUX_DATA(D9_MARK, PORT213_FN1), PINMUX_DATA(D8_MARK, PORT214_FN1), PINMUX_DATA(D7_MARK, PORT215_FN1), PINMUX_DATA(D6_MARK, PORT216_FN1), PINMUX_DATA(D5_MARK, PORT217_FN1), PINMUX_DATA(D4_MARK, PORT218_FN1), PINMUX_DATA(D3_MARK, PORT219_FN1), PINMUX_DATA(D2_MARK, PORT220_FN1), PINMUX_DATA(D1_MARK, PORT221_FN1), PINMUX_DATA(D0_MARK, PORT222_FN1), /* Port213 - Port222 FN2 */ PINMUX_DATA(VIO_D9_MARK, PORT213_FN2), PINMUX_DATA(VIO_D8_MARK, PORT214_FN2), PINMUX_DATA(VIO_D7_MARK, PORT215_FN2), PINMUX_DATA(VIO_D6_MARK, PORT216_FN2), PINMUX_DATA(VIO_D5_MARK, PORT217_FN2), PINMUX_DATA(VIO_D4_MARK, PORT218_FN2), PINMUX_DATA(VIO_D3_MARK, PORT219_FN2), PINMUX_DATA(VIO_D2_MARK, PORT220_FN2), PINMUX_DATA(VIO_D1_MARK, PORT221_FN2), PINMUX_DATA(VIO_D0_MARK, PORT222_FN2), /* Port213 - Port222 FN5 */ PINMUX_DATA(GIO_OUT9_MARK, PORT213_FN5), PINMUX_DATA(GIO_OUT8_MARK, PORT214_FN5), PINMUX_DATA(GIO_OUT7_MARK, PORT215_FN5), PINMUX_DATA(GIO_OUT6_MARK, PORT216_FN5), PINMUX_DATA(GIO_OUT5_217_MARK, PORT217_FN5), PINMUX_DATA(GIO_OUT4_218_MARK, PORT218_FN5), PINMUX_DATA(GIO_OUT3_219_MARK, PORT219_FN5), PINMUX_DATA(GIO_OUT2_220_MARK, PORT220_FN5), PINMUX_DATA(GIO_OUT1_221_MARK, PORT221_FN5), PINMUX_DATA(GIO_OUT0_222_MARK, PORT222_FN5), /* Port224 */ PINMUX_DATA(RDWR_224_MARK, PORT224_FN1), PINMUX_DATA(VIO_HD_MARK, PORT224_FN2), PINMUX_DATA(SIM1_GPO2_MARK, PORT224_FN5), /* Port225 */ PINMUX_DATA(RD_N_MARK, PORT225_FN1), /* Port226 */ PINMUX_DATA(WAIT_N_MARK, PORT226_FN1), PINMUX_DATA(VIO_CLK_MARK, PORT226_FN2), PINMUX_DATA(SIM1_GPO1_MARK, PORT226_FN5), /* Port227 */ PINMUX_DATA(WE0_N_MARK, PORT227_FN1), PINMUX_DATA(RDWR_227_MARK, PORT227_FN2), /* Port228 */ PINMUX_DATA(WE1_N_MARK, PORT228_FN1), PINMUX_DATA(SIM0_GPO0_MARK, PORT228_FN5), /* Port229 */ PINMUX_DATA(PWMO_MARK, PORT229_FN1), PINMUX_DATA(VIO_CKO1_229_MARK, PORT229_FN2), /* Port230 */ PINMUX_DATA(SLIM_CLK_MARK, PORT230_FN1), PINMUX_DATA(VIO_CKO4_230_MARK, PORT230_FN2), /* Port231 */ PINMUX_DATA(SLIM_DATA_MARK, PORT231_FN1), PINMUX_DATA(VIO_CKO5_231_MARK, PORT231_FN2), /* Port232 */ PINMUX_DATA(VIO_CKO2_232_MARK, PORT232_FN2), PINMUX_DATA(SF_PORT_0_232_MARK, PORT232_FN4), /* Port233 */ PINMUX_DATA(VIO_CKO3_233_MARK, PORT233_FN2), PINMUX_DATA(SF_PORT_1_233_MARK, PORT233_FN4), /* Port234 */ PINMUX_DATA(FSIACK_MARK, PORT234_FN1), PINMUX_DATA(PDM3_CLK_234_MARK, PORT234_FN2), PINMUX_DATA(ISP_IRIS1_234_MARK, PORT234_FN3), /* Port235 */ PINMUX_DATA(FSIAISLD_MARK, PORT235_FN1), PINMUX_DATA(PDM3_DATA_235_MARK, PORT235_FN2, MSEL3CR_12_1), /* Port236 */ PINMUX_DATA(FSIAOMC_MARK, PORT236_FN1), PINMUX_DATA(PDM0_OUTCLK_236_MARK, PORT236_FN2), PINMUX_DATA(ISP_IRIS0_236_MARK, PORT236_FN3), /* Port237 */ PINMUX_DATA(FSIAOLR_MARK, PORT237_FN1), PINMUX_DATA(FSIAILR_MARK, PORT237_FN2), /* Port238 */ PINMUX_DATA(FSIAOBT_MARK, PORT238_FN1), PINMUX_DATA(FSIAIBT_MARK, PORT238_FN2), /* Port239 */ PINMUX_DATA(FSIAOSLD_MARK, PORT239_FN1), PINMUX_DATA(PDM0_OUTDATA_239_MARK, PORT239_FN2), /* Port240 */ PINMUX_DATA(FSIBISLD_MARK, PORT240_FN1), /* Port241 */ PINMUX_DATA(FSIBOLR_MARK, PORT241_FN1), PINMUX_DATA(FSIBILR_MARK, PORT241_FN2), /* Port242 */ PINMUX_DATA(FSIBOMC_MARK, PORT242_FN1), PINMUX_DATA(ISP_SHUTTER1_242_MARK, PORT242_FN3), /* Port243 */ PINMUX_DATA(FSIBOBT_MARK, PORT243_FN1), PINMUX_DATA(FSIBIBT_MARK, PORT243_FN2), /* Port244 */ PINMUX_DATA(FSIBOSLD_MARK, PORT244_FN1), PINMUX_DATA(FSIASPDIF_MARK, PORT244_FN2), /* Port245 */ PINMUX_DATA(FSIBCK_MARK, PORT245_FN1), PINMUX_DATA(ISP_SHUTTER0_245_MARK, PORT245_FN3), /* Port246 - Port250 FN1 */ PINMUX_DATA(ISP_IRIS1_246_MARK, PORT246_FN1), PINMUX_DATA(ISP_IRIS0_247_MARK, PORT247_FN1), PINMUX_DATA(ISP_SHUTTER1_248_MARK, PORT248_FN1), PINMUX_DATA(ISP_SHUTTER0_249_MARK, PORT249_FN1), PINMUX_DATA(ISP_STROBE_250_MARK, PORT250_FN1), /* Port256 - Port258 */ PINMUX_DATA(MSIOF0_SYNC_MARK, PORT256_FN1), PINMUX_DATA(MSIOF0_RXD_MARK, PORT257_FN1), PINMUX_DATA(MSIOF0_SCK_MARK, PORT258_FN1), /* Port259 */ PINMUX_DATA(MSIOF0_SS2_MARK, PORT259_FN1), PINMUX_DATA(VIO_CKO3_259_MARK, PORT259_FN3), /* Port260 */ PINMUX_DATA(MSIOF0_TXD_MARK, PORT260_FN1), /* Port261 */ PINMUX_DATA(SCIFB1_SCK_261_MARK, PORT261_FN2), PINMUX_DATA(CHSCIF1_HSCK_MARK, PORT261_FN7), /* Port262 */ PINMUX_DATA(SCIFB2_SCK_262_MARK, PORT262_FN2), /* Port263 - Port266 FN1 */ PINMUX_DATA(MSIOF1_SS2_MARK, PORT263_FN1), PINMUX_DATA(MSIOF1_TXD_MARK, PORT264_FN1), PINMUX_DATA(MSIOF1_RXD_MARK, PORT265_FN1), PINMUX_DATA(MSIOF1_SS1_MARK, PORT266_FN1), /* Port263 - Port266 FN4 */ PINMUX_DATA(MSIOF5_SS2_MARK, PORT263_FN4), PINMUX_DATA(MSIOF5_TXD_MARK, PORT264_FN4), PINMUX_DATA(MSIOF5_RXD_MARK, PORT265_FN4), PINMUX_DATA(MSIOF5_SS1_MARK, PORT266_FN4), /* Port267 */ PINMUX_DATA(MSIOF0_SS1_MARK, PORT267_FN1), /* Port268 */ PINMUX_DATA(MSIOF1_SCK_MARK, PORT268_FN1), PINMUX_DATA(MSIOF5_SCK_MARK, PORT268_FN4), /* Port269 */ PINMUX_DATA(MSIOF1_SYNC_MARK, PORT269_FN1), PINMUX_DATA(MSIOF5_SYNC_MARK, PORT269_FN4), /* Port270 - Port273 FN1 */ PINMUX_DATA(MSIOF2_SS1_MARK, PORT270_FN1), PINMUX_DATA(MSIOF2_SS2_MARK, PORT271_FN1), PINMUX_DATA(MSIOF3_SS2_MARK, PORT272_FN1), PINMUX_DATA(MSIOF3_SS1_MARK, PORT273_FN1), /* Port270 - Port273 FN3 */ PINMUX_DATA(VIO_CKO5_270_MARK, PORT270_FN3), PINMUX_DATA(VIO_CKO2_271_MARK, PORT271_FN3), PINMUX_DATA(VIO_CKO1_272_MARK, PORT272_FN3), PINMUX_DATA(VIO_CKO4_273_MARK, PORT273_FN3), /* Port274 */ PINMUX_DATA(MSIOF4_SS2_MARK, PORT274_FN1), PINMUX_DATA(TPU1TO0_MARK, PORT274_FN4), /* Port275 - Port280 */ PINMUX_DATA(IC_DP_MARK, PORT275_FN1), PINMUX_DATA(SIM0_RST_MARK, PORT276_FN1), PINMUX_DATA(IC_DM_MARK, PORT277_FN1), PINMUX_DATA(SIM0_BSICOMP_MARK, PORT278_FN1), PINMUX_DATA(SIM0_CLK_MARK, PORT279_FN1), PINMUX_DATA(SIM0_IO_MARK, PORT280_FN1), /* Port281 */ PINMUX_DATA(SIM1_IO_MARK, PORT281_FN1), PINMUX_DATA(PDM2_DATA_281_MARK, PORT281_FN2, MSEL3CR_12_1), /* Port282 */ PINMUX_DATA(SIM1_CLK_MARK, PORT282_FN1), PINMUX_DATA(PDM2_CLK_282_MARK, PORT282_FN2), /* Port283 */ PINMUX_DATA(SIM1_RST_MARK, PORT283_FN1), /* Port289 */ PINMUX_DATA(SDHID1_0_MARK, PORT289_FN1), PINMUX_DATA(STMDATA0_2_MARK, PORT289_FN3), /* Port290 */ PINMUX_DATA(SDHID1_1_MARK, PORT290_FN1), PINMUX_DATA(STMDATA1_2_MARK, PORT290_FN3), PINMUX_DATA(IRQ51_MARK, PORT290_FN0), /* Port291 - Port294 FN1 */ PINMUX_DATA(SDHID1_2_MARK, PORT291_FN1), PINMUX_DATA(SDHID1_3_MARK, PORT292_FN1), PINMUX_DATA(SDHICLK1_MARK, PORT293_FN1), PINMUX_DATA(SDHICMD1_MARK, PORT294_FN1), /* Port291 - Port294 FN3 */ PINMUX_DATA(STMDATA2_2_MARK, PORT291_FN3), PINMUX_DATA(STMDATA3_2_MARK, PORT292_FN3), PINMUX_DATA(STMCLK_2_MARK, PORT293_FN3), PINMUX_DATA(STMSIDI_2_MARK, PORT294_FN3), /* Port295 */ PINMUX_DATA(SDHID2_0_MARK, PORT295_FN1), PINMUX_DATA(MSIOF4_TXD_MARK, PORT295_FN2), PINMUX_DATA(SCIFB2_TXD_295_MARK, PORT295_FN3, MSEL3CR_10_1), PINMUX_DATA(MSIOF6_TXD_MARK, PORT295_FN4), /* Port296 */ PINMUX_DATA(SDHID2_1_MARK, PORT296_FN1), PINMUX_DATA(MSIOF6_SS2_MARK, PORT296_FN4), PINMUX_DATA(IRQ52_MARK, PORT296_FN0), /* Port297 - Port300 FN1 */ PINMUX_DATA(SDHID2_2_MARK, PORT297_FN1), PINMUX_DATA(SDHID2_3_MARK, PORT298_FN1), PINMUX_DATA(SDHICLK2_MARK, PORT299_FN1), PINMUX_DATA(SDHICMD2_MARK, PORT300_FN1), /* Port297 - Port300 FN2 */ PINMUX_DATA(MSIOF4_RXD_MARK, PORT297_FN2), PINMUX_DATA(MSIOF4_SYNC_MARK, PORT298_FN2), PINMUX_DATA(MSIOF4_SCK_MARK, PORT299_FN2), PINMUX_DATA(MSIOF4_SS1_MARK, PORT300_FN2), /* Port297 - Port300 FN3 */ PINMUX_DATA(SCIFB2_RXD_297_MARK, PORT297_FN3, MSEL3CR_10_1), PINMUX_DATA(SCIFB2_CTS_298_MARK, PORT298_FN3, MSEL3CR_10_1), PINMUX_DATA(SCIFB2_SCK_299_MARK, PORT299_FN3), PINMUX_DATA(SCIFB2_RTS_300_MARK, PORT300_FN3), /* Port297 - Port300 FN4 */ PINMUX_DATA(MSIOF6_RXD_MARK, PORT297_FN4), PINMUX_DATA(MSIOF6_SYNC_MARK, PORT298_FN4), PINMUX_DATA(MSIOF6_SCK_MARK, PORT299_FN4), PINMUX_DATA(MSIOF6_SS1_MARK, PORT300_FN4), /* Port301 */ PINMUX_DATA(SDHICD0_MARK, PORT301_FN1), PINMUX_DATA(IRQ50_MARK, PORT301_FN0), /* Port302 - Port306 FN1 */ PINMUX_DATA(SDHID0_0_MARK, PORT302_FN1), PINMUX_DATA(SDHID0_1_MARK, PORT303_FN1), PINMUX_DATA(SDHID0_2_MARK, PORT304_FN1), PINMUX_DATA(SDHID0_3_MARK, PORT305_FN1), PINMUX_DATA(SDHICMD0_MARK, PORT306_FN1), /* Port302 - Port306 FN3 */ PINMUX_DATA(STMDATA0_1_MARK, PORT302_FN3), PINMUX_DATA(STMDATA1_1_MARK, PORT303_FN3), PINMUX_DATA(STMDATA2_1_MARK, PORT304_FN3), PINMUX_DATA(STMDATA3_1_MARK, PORT305_FN3), PINMUX_DATA(STMSIDI_1_MARK, PORT306_FN3), /* Port307 */ PINMUX_DATA(SDHIWP0_MARK, PORT307_FN1), /* Port308 */ PINMUX_DATA(SDHICLK0_MARK, PORT308_FN1), PINMUX_DATA(STMCLK_1_MARK, PORT308_FN3), /* Port320 - Port329 */ PINMUX_DATA(IRQ16_MARK, PORT320_FN0), PINMUX_DATA(IRQ17_MARK, PORT321_FN0), PINMUX_DATA(IRQ28_MARK, PORT322_FN0), PINMUX_DATA(IRQ29_MARK, PORT323_FN0), PINMUX_DATA(IRQ30_MARK, PORT324_FN0), PINMUX_DATA(IRQ53_MARK, PORT325_FN0), PINMUX_DATA(IRQ54_MARK, PORT326_FN0), PINMUX_DATA(IRQ55_MARK, PORT327_FN0), PINMUX_DATA(IRQ56_MARK, PORT328_FN0), PINMUX_DATA(IRQ57_MARK, PORT329_FN0), }; #define __O (SH_PFC_PIN_CFG_OUTPUT) #define __IO (SH_PFC_PIN_CFG_INPUT | SH_PFC_PIN_CFG_OUTPUT) #define __PUD (SH_PFC_PIN_CFG_PULL_DOWN | SH_PFC_PIN_CFG_PULL_UP) #define R8A73A4_PIN_IO_PU_PD(pin) SH_PFC_PIN_CFG(pin, __IO | __PUD) #define R8A73A4_PIN_O(pin) SH_PFC_PIN_CFG(pin, __O) static const struct sh_pfc_pin pinmux_pins[] = { R8A73A4_PIN_IO_PU_PD(0), R8A73A4_PIN_IO_PU_PD(1), R8A73A4_PIN_IO_PU_PD(2), R8A73A4_PIN_IO_PU_PD(3), R8A73A4_PIN_IO_PU_PD(4), R8A73A4_PIN_IO_PU_PD(5), R8A73A4_PIN_IO_PU_PD(6), R8A73A4_PIN_IO_PU_PD(7), R8A73A4_PIN_IO_PU_PD(8), R8A73A4_PIN_IO_PU_PD(9), R8A73A4_PIN_IO_PU_PD(10), R8A73A4_PIN_IO_PU_PD(11), R8A73A4_PIN_IO_PU_PD(12), R8A73A4_PIN_IO_PU_PD(13), R8A73A4_PIN_IO_PU_PD(14), R8A73A4_PIN_IO_PU_PD(15), R8A73A4_PIN_IO_PU_PD(16), R8A73A4_PIN_IO_PU_PD(17), R8A73A4_PIN_IO_PU_PD(18), R8A73A4_PIN_IO_PU_PD(19), R8A73A4_PIN_IO_PU_PD(20), R8A73A4_PIN_IO_PU_PD(21), R8A73A4_PIN_IO_PU_PD(22), R8A73A4_PIN_IO_PU_PD(23), R8A73A4_PIN_IO_PU_PD(24), R8A73A4_PIN_IO_PU_PD(25), R8A73A4_PIN_IO_PU_PD(26), R8A73A4_PIN_IO_PU_PD(27), R8A73A4_PIN_IO_PU_PD(28), R8A73A4_PIN_IO_PU_PD(29), R8A73A4_PIN_IO_PU_PD(30), R8A73A4_PIN_IO_PU_PD(32), R8A73A4_PIN_IO_PU_PD(33), R8A73A4_PIN_IO_PU_PD(34), R8A73A4_PIN_IO_PU_PD(35), R8A73A4_PIN_IO_PU_PD(36), R8A73A4_PIN_IO_PU_PD(37), R8A73A4_PIN_IO_PU_PD(38), R8A73A4_PIN_IO_PU_PD(39), R8A73A4_PIN_IO_PU_PD(40), R8A73A4_PIN_IO_PU_PD(64), R8A73A4_PIN_IO_PU_PD(65), R8A73A4_PIN_IO_PU_PD(66), R8A73A4_PIN_IO_PU_PD(67), R8A73A4_PIN_IO_PU_PD(68), R8A73A4_PIN_IO_PU_PD(69), R8A73A4_PIN_IO_PU_PD(70), R8A73A4_PIN_IO_PU_PD(71), R8A73A4_PIN_IO_PU_PD(72), R8A73A4_PIN_IO_PU_PD(73), R8A73A4_PIN_O(74), R8A73A4_PIN_IO_PU_PD(75), R8A73A4_PIN_IO_PU_PD(76), R8A73A4_PIN_IO_PU_PD(77), R8A73A4_PIN_IO_PU_PD(78), R8A73A4_PIN_IO_PU_PD(79), R8A73A4_PIN_IO_PU_PD(80), R8A73A4_PIN_IO_PU_PD(81), R8A73A4_PIN_IO_PU_PD(82), R8A73A4_PIN_IO_PU_PD(83), R8A73A4_PIN_IO_PU_PD(84), R8A73A4_PIN_IO_PU_PD(85), R8A73A4_PIN_IO_PU_PD(96), R8A73A4_PIN_IO_PU_PD(97), R8A73A4_PIN_IO_PU_PD(98), R8A73A4_PIN_IO_PU_PD(99), R8A73A4_PIN_IO_PU_PD(100), R8A73A4_PIN_IO_PU_PD(101), R8A73A4_PIN_IO_PU_PD(102), R8A73A4_PIN_IO_PU_PD(103), R8A73A4_PIN_IO_PU_PD(104), R8A73A4_PIN_IO_PU_PD(105), R8A73A4_PIN_IO_PU_PD(106), R8A73A4_PIN_IO_PU_PD(107), R8A73A4_PIN_IO_PU_PD(108), R8A73A4_PIN_IO_PU_PD(109), R8A73A4_PIN_IO_PU_PD(110), R8A73A4_PIN_IO_PU_PD(111), R8A73A4_PIN_IO_PU_PD(112), R8A73A4_PIN_IO_PU_PD(113), R8A73A4_PIN_IO_PU_PD(114), R8A73A4_PIN_IO_PU_PD(115), R8A73A4_PIN_IO_PU_PD(116), R8A73A4_PIN_IO_PU_PD(117), R8A73A4_PIN_IO_PU_PD(118), R8A73A4_PIN_IO_PU_PD(119), R8A73A4_PIN_IO_PU_PD(120), R8A73A4_PIN_IO_PU_PD(121), R8A73A4_PIN_IO_PU_PD(122), R8A73A4_PIN_IO_PU_PD(123), R8A73A4_PIN_IO_PU_PD(124), R8A73A4_PIN_IO_PU_PD(125), R8A73A4_PIN_IO_PU_PD(126), R8A73A4_PIN_IO_PU_PD(128), R8A73A4_PIN_IO_PU_PD(129), R8A73A4_PIN_IO_PU_PD(130), R8A73A4_PIN_IO_PU_PD(131), R8A73A4_PIN_IO_PU_PD(132), R8A73A4_PIN_IO_PU_PD(133), R8A73A4_PIN_IO_PU_PD(134), R8A73A4_PIN_IO_PU_PD(160), R8A73A4_PIN_IO_PU_PD(161), R8A73A4_PIN_IO_PU_PD(162), R8A73A4_PIN_IO_PU_PD(163), R8A73A4_PIN_IO_PU_PD(164), R8A73A4_PIN_IO_PU_PD(165), R8A73A4_PIN_IO_PU_PD(166), R8A73A4_PIN_IO_PU_PD(167), R8A73A4_PIN_IO_PU_PD(168), R8A73A4_PIN_IO_PU_PD(169), R8A73A4_PIN_IO_PU_PD(170), R8A73A4_PIN_IO_PU_PD(171), R8A73A4_PIN_IO_PU_PD(172), R8A73A4_PIN_IO_PU_PD(173), R8A73A4_PIN_IO_PU_PD(174), R8A73A4_PIN_IO_PU_PD(175), R8A73A4_PIN_IO_PU_PD(176), R8A73A4_PIN_IO_PU_PD(177), R8A73A4_PIN_IO_PU_PD(178), R8A73A4_PIN_IO_PU_PD(192), R8A73A4_PIN_IO_PU_PD(193), R8A73A4_PIN_IO_PU_PD(194), R8A73A4_PIN_IO_PU_PD(195), R8A73A4_PIN_IO_PU_PD(196), R8A73A4_PIN_IO_PU_PD(197), R8A73A4_PIN_IO_PU_PD(198), R8A73A4_PIN_IO_PU_PD(199), R8A73A4_PIN_IO_PU_PD(200), R8A73A4_PIN_IO_PU_PD(201), R8A73A4_PIN_IO_PU_PD(202), R8A73A4_PIN_IO_PU_PD(203), R8A73A4_PIN_IO_PU_PD(204), R8A73A4_PIN_IO_PU_PD(205), R8A73A4_PIN_IO_PU_PD(206), R8A73A4_PIN_IO_PU_PD(207), R8A73A4_PIN_IO_PU_PD(208), R8A73A4_PIN_IO_PU_PD(209), R8A73A4_PIN_IO_PU_PD(210), R8A73A4_PIN_IO_PU_PD(211), R8A73A4_PIN_IO_PU_PD(212), R8A73A4_PIN_IO_PU_PD(213), R8A73A4_PIN_IO_PU_PD(214), R8A73A4_PIN_IO_PU_PD(215), R8A73A4_PIN_IO_PU_PD(216), R8A73A4_PIN_IO_PU_PD(217), R8A73A4_PIN_IO_PU_PD(218), R8A73A4_PIN_IO_PU_PD(219), R8A73A4_PIN_IO_PU_PD(220), R8A73A4_PIN_IO_PU_PD(221), R8A73A4_PIN_IO_PU_PD(222), R8A73A4_PIN_IO_PU_PD(224), R8A73A4_PIN_IO_PU_PD(225), R8A73A4_PIN_IO_PU_PD(226), R8A73A4_PIN_IO_PU_PD(227), R8A73A4_PIN_IO_PU_PD(228), R8A73A4_PIN_IO_PU_PD(229), R8A73A4_PIN_IO_PU_PD(230), R8A73A4_PIN_IO_PU_PD(231), R8A73A4_PIN_IO_PU_PD(232), R8A73A4_PIN_IO_PU_PD(233), R8A73A4_PIN_IO_PU_PD(234), R8A73A4_PIN_IO_PU_PD(235), R8A73A4_PIN_IO_PU_PD(236), R8A73A4_PIN_IO_PU_PD(237), R8A73A4_PIN_IO_PU_PD(238), R8A73A4_PIN_IO_PU_PD(239), R8A73A4_PIN_IO_PU_PD(240), R8A73A4_PIN_IO_PU_PD(241), R8A73A4_PIN_IO_PU_PD(242), R8A73A4_PIN_IO_PU_PD(243), R8A73A4_PIN_IO_PU_PD(244), R8A73A4_PIN_IO_PU_PD(245), R8A73A4_PIN_IO_PU_PD(246), R8A73A4_PIN_IO_PU_PD(247), R8A73A4_PIN_IO_PU_PD(248), R8A73A4_PIN_IO_PU_PD(249), R8A73A4_PIN_IO_PU_PD(250), R8A73A4_PIN_IO_PU_PD(256), R8A73A4_PIN_IO_PU_PD(257), R8A73A4_PIN_IO_PU_PD(258), R8A73A4_PIN_IO_PU_PD(259), R8A73A4_PIN_IO_PU_PD(260), R8A73A4_PIN_IO_PU_PD(261), R8A73A4_PIN_IO_PU_PD(262), R8A73A4_PIN_IO_PU_PD(263), R8A73A4_PIN_IO_PU_PD(264), R8A73A4_PIN_IO_PU_PD(265), R8A73A4_PIN_IO_PU_PD(266), R8A73A4_PIN_IO_PU_PD(267), R8A73A4_PIN_IO_PU_PD(268), R8A73A4_PIN_IO_PU_PD(269), R8A73A4_PIN_IO_PU_PD(270), R8A73A4_PIN_IO_PU_PD(271), R8A73A4_PIN_IO_PU_PD(272), R8A73A4_PIN_IO_PU_PD(273), R8A73A4_PIN_IO_PU_PD(274), R8A73A4_PIN_IO_PU_PD(275), R8A73A4_PIN_IO_PU_PD(276), R8A73A4_PIN_IO_PU_PD(277), R8A73A4_PIN_IO_PU_PD(278), R8A73A4_PIN_IO_PU_PD(279), R8A73A4_PIN_IO_PU_PD(280), R8A73A4_PIN_IO_PU_PD(281), R8A73A4_PIN_IO_PU_PD(282), R8A73A4_PIN_IO_PU_PD(283), R8A73A4_PIN_O(288), R8A73A4_PIN_IO_PU_PD(289), R8A73A4_PIN_IO_PU_PD(290), R8A73A4_PIN_IO_PU_PD(291), R8A73A4_PIN_IO_PU_PD(292), R8A73A4_PIN_IO_PU_PD(293), R8A73A4_PIN_IO_PU_PD(294), R8A73A4_PIN_IO_PU_PD(295), R8A73A4_PIN_IO_PU_PD(296), R8A73A4_PIN_IO_PU_PD(297), R8A73A4_PIN_IO_PU_PD(298), R8A73A4_PIN_IO_PU_PD(299), R8A73A4_PIN_IO_PU_PD(300), R8A73A4_PIN_IO_PU_PD(301), R8A73A4_PIN_IO_PU_PD(302), R8A73A4_PIN_IO_PU_PD(303), R8A73A4_PIN_IO_PU_PD(304), R8A73A4_PIN_IO_PU_PD(305), R8A73A4_PIN_IO_PU_PD(306), R8A73A4_PIN_IO_PU_PD(307), R8A73A4_PIN_IO_PU_PD(308), R8A73A4_PIN_IO_PU_PD(320), R8A73A4_PIN_IO_PU_PD(321), R8A73A4_PIN_IO_PU_PD(322), R8A73A4_PIN_IO_PU_PD(323), R8A73A4_PIN_IO_PU_PD(324), R8A73A4_PIN_IO_PU_PD(325), R8A73A4_PIN_IO_PU_PD(326), R8A73A4_PIN_IO_PU_PD(327), R8A73A4_PIN_IO_PU_PD(328), R8A73A4_PIN_IO_PU_PD(329), }; /* - IRQC ------------------------------------------------------------------- */ #define IRQC_PINS_MUX(pin, irq_mark) \ static const unsigned int irqc_irq##irq_mark##_pins[] = { \ pin, \ }; \ static const unsigned int irqc_irq##irq_mark##_mux[] = { \ IRQ##irq_mark##_MARK, \ } IRQC_PINS_MUX(0, 0); IRQC_PINS_MUX(1, 1); IRQC_PINS_MUX(2, 2); IRQC_PINS_MUX(3, 3); IRQC_PINS_MUX(4, 4); IRQC_PINS_MUX(5, 5); IRQC_PINS_MUX(6, 6); IRQC_PINS_MUX(7, 7); IRQC_PINS_MUX(8, 8); IRQC_PINS_MUX(9, 9); IRQC_PINS_MUX(10, 10); IRQC_PINS_MUX(11, 11); IRQC_PINS_MUX(12, 12); IRQC_PINS_MUX(13, 13); IRQC_PINS_MUX(14, 14); IRQC_PINS_MUX(15, 15); IRQC_PINS_MUX(66, 40); IRQC_PINS_MUX(84, 19); IRQC_PINS_MUX(85, 18); IRQC_PINS_MUX(102, 41); IRQC_PINS_MUX(103, 42); IRQC_PINS_MUX(109, 43); IRQC_PINS_MUX(110, 44); IRQC_PINS_MUX(111, 45); IRQC_PINS_MUX(112, 46); IRQC_PINS_MUX(113, 47); IRQC_PINS_MUX(114, 48); IRQC_PINS_MUX(115, 49); IRQC_PINS_MUX(160, 20); IRQC_PINS_MUX(161, 21); IRQC_PINS_MUX(162, 22); IRQC_PINS_MUX(163, 23); IRQC_PINS_MUX(175, 24); IRQC_PINS_MUX(176, 25); IRQC_PINS_MUX(177, 26); IRQC_PINS_MUX(178, 27); IRQC_PINS_MUX(192, 31); IRQC_PINS_MUX(193, 32); IRQC_PINS_MUX(194, 33); IRQC_PINS_MUX(195, 34); IRQC_PINS_MUX(196, 35); IRQC_PINS_MUX(197, 36); IRQC_PINS_MUX(198, 37); IRQC_PINS_MUX(199, 38); IRQC_PINS_MUX(200, 39); IRQC_PINS_MUX(290, 51); IRQC_PINS_MUX(296, 52); IRQC_PINS_MUX(301, 50); IRQC_PINS_MUX(320, 16); IRQC_PINS_MUX(321, 17); IRQC_PINS_MUX(322, 28); IRQC_PINS_MUX(323, 29); IRQC_PINS_MUX(324, 30); IRQC_PINS_MUX(325, 53); IRQC_PINS_MUX(326, 54); IRQC_PINS_MUX(327, 55); IRQC_PINS_MUX(328, 56); IRQC_PINS_MUX(329, 57); /* - MMCIF0 ----------------------------------------------------------------- */ static const unsigned int mmc0_data1_pins[] = { /* D[0] */ 164, }; static const unsigned int mmc0_data1_mux[] = { MMCD0_0_MARK, }; static const unsigned int mmc0_data4_pins[] = { /* D[0:3] */ 164, 165, 166, 167, }; static const unsigned int mmc0_data4_mux[] = { MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK, }; static const unsigned int mmc0_data8_pins[] = { /* D[0:7] */ 164, 165, 166, 167, 168, 169, 170, 171, }; static const unsigned int mmc0_data8_mux[] = { MMCD0_0_MARK, MMCD0_1_MARK, MMCD0_2_MARK, MMCD0_3_MARK, MMCD0_4_MARK, MMCD0_5_MARK, MMCD0_6_MARK, MMCD0_7_MARK, }; static const unsigned int mmc0_ctrl_pins[] = { /* CMD, CLK */ 172, 173, }; static const unsigned int mmc0_ctrl_mux[] = { MMCCMD0_MARK, MMCCLK0_MARK, }; /* - MMCIF1 ----------------------------------------------------------------- */ static const unsigned int mmc1_data1_pins[] = { /* D[0] */ 199, }; static const unsigned int mmc1_data1_mux[] = { MMCD1_0_MARK, }; static const unsigned int mmc1_data4_pins[] = { /* D[0:3] */ 199, 198, 197, 196, }; static const unsigned int mmc1_data4_mux[] = { MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK, }; static const unsigned int mmc1_data8_pins[] = { /* D[0:7] */ 199, 198, 197, 196, 195, 194, 193, 192, }; static const unsigned int mmc1_data8_mux[] = { MMCD1_0_MARK, MMCD1_1_MARK, MMCD1_2_MARK, MMCD1_3_MARK, MMCD1_4_MARK, MMCD1_5_MARK, MMCD1_6_MARK, MMCD1_7_MARK, }; static const unsigned int mmc1_ctrl_pins[] = { /* CMD, CLK */ 200, 203, }; static const unsigned int mmc1_ctrl_mux[] = { MMCCMD1_MARK, MMCCLK1_MARK, }; /* - SCIFA0 ----------------------------------------------------------------- */ static const unsigned int scifa0_data_pins[] = { /* SCIFA0_RXD, SCIFA0_TXD */ 117, 116, }; static const unsigned int scifa0_data_mux[] = { SCIFA0_RXD_MARK, SCIFA0_TXD_MARK, }; static const unsigned int scifa0_clk_pins[] = { /* SCIFA0_SCK */ 34, }; static const unsigned int scifa0_clk_mux[] = { SCIFA0_SCK_MARK, }; static const unsigned int scifa0_ctrl_pins[] = { /* SCIFA0_RTS, SCIFA0_CTS */ 32, 33, }; static const unsigned int scifa0_ctrl_mux[] = { SCIFA0_RTS_MARK, SCIFA0_CTS_MARK, }; /* - SCIFA1 ----------------------------------------------------------------- */ static const unsigned int scifa1_data_pins[] = { /* SCIFA1_RXD, SCIFA1_TXD */ 119, 118, }; static const unsigned int scifa1_data_mux[] = { SCIFA1_RXD_MARK, SCIFA1_TXD_MARK, }; static const unsigned int scifa1_clk_pins[] = { /* SCIFA1_SCK */ 37, }; static const unsigned int scifa1_clk_mux[] = { SCIFA1_SCK_MARK, }; static const unsigned int scifa1_ctrl_pins[] = { /* SCIFA1_RTS, SCIFA1_CTS */ 35, 36, }; static const unsigned int scifa1_ctrl_mux[] = { SCIFA1_RTS_MARK, SCIFA1_CTS_MARK, }; /* - SCIFB0 ----------------------------------------------------------------- */ static const unsigned int scifb0_data_pins[] = { /* SCIFB0_RXD, SCIFB0_TXD */ 123, 122, }; static const unsigned int scifb0_data_mux[] = { SCIFB0_RXD_MARK, SCIFB0_TXD_MARK, }; static const unsigned int scifb0_clk_pins[] = { /* SCIFB0_SCK */ 40, }; static const unsigned int scifb0_clk_mux[] = { SCIFB0_SCK_MARK, }; static const unsigned int scifb0_ctrl_pins[] = { /* SCIFB0_RTS, SCIFB0_CTS */ 38, 39, }; static const unsigned int scifb0_ctrl_mux[] = { SCIFB0_RTS_MARK, SCIFB0_CTS_MARK, }; /* - SCIFB1 ----------------------------------------------------------------- */ static const unsigned int scifb1_data_pins[] = { /* SCIFB1_RXD, SCIFB1_TXD */ 27, 26, }; static const unsigned int scifb1_data_mux[] = { SCIFB1_RXD_27_MARK, SCIFB1_TXD_26_MARK, }; static const unsigned int scifb1_clk_pins[] = { /* SCIFB1_SCK */ 28, }; static const unsigned int scifb1_clk_mux[] = { SCIFB1_SCK_28_MARK, }; static const unsigned int scifb1_ctrl_pins[] = { /* SCIFB1_RTS, SCIFB1_CTS */ 24, 25, }; static const unsigned int scifb1_ctrl_mux[] = { SCIFB1_RTS_24_MARK, SCIFB1_CTS_25_MARK, }; static const unsigned int scifb1_data_b_pins[] = { /* SCIFB1_RXD, SCIFB1_TXD */ 72, 67, }; static const unsigned int scifb1_data_b_mux[] = { SCIFB1_RXD_72_MARK, SCIFB1_TXD_67_MARK, }; static const unsigned int scifb1_clk_b_pins[] = { /* SCIFB1_SCK */ 261, }; static const unsigned int scifb1_clk_b_mux[] = { SCIFB1_SCK_261_MARK, }; static const unsigned int scifb1_ctrl_b_pins[] = { /* SCIFB1_RTS, SCIFB1_CTS */ 70, 71, }; static const unsigned int scifb1_ctrl_b_mux[] = { SCIFB1_RTS_70_MARK, SCIFB1_CTS_71_MARK, }; /* - SCIFB2 ----------------------------------------------------------------- */ static const unsigned int scifb2_data_pins[] = { /* SCIFB2_RXD, SCIFB2_TXD */ 69, 68, }; static const unsigned int scifb2_data_mux[] = { SCIFB2_RXD_69_MARK, SCIFB2_TXD_68_MARK, }; static const unsigned int scifb2_clk_pins[] = { /* SCIFB2_SCK */ 262, }; static const unsigned int scifb2_clk_mux[] = { SCIFB2_SCK_262_MARK, }; static const unsigned int scifb2_ctrl_pins[] = { /* SCIFB2_RTS, SCIFB2_CTS */ 73, 66, }; static const unsigned int scifb2_ctrl_mux[] = { SCIFB2_RTS_73_MARK, SCIFB2_CTS_66_MARK, }; static const unsigned int scifb2_data_b_pins[] = { /* SCIFB2_RXD, SCIFB2_TXD */ 297, 295, }; static const unsigned int scifb2_data_b_mux[] = { SCIFB2_RXD_297_MARK, SCIFB2_TXD_295_MARK, }; static const unsigned int scifb2_clk_b_pins[] = { /* SCIFB2_SCK */ 299, }; static const unsigned int scifb2_clk_b_mux[] = { SCIFB2_SCK_299_MARK, }; static const unsigned int scifb2_ctrl_b_pins[] = { /* SCIFB2_RTS, SCIFB2_CTS */ 300, 298, }; static const unsigned int scifb2_ctrl_b_mux[] = { SCIFB2_RTS_300_MARK, SCIFB2_CTS_298_MARK, }; /* - SCIFB3 ----------------------------------------------------------------- */ static const unsigned int scifb3_data_pins[] = { /* SCIFB3_RXD, SCIFB3_TXD */ 22, 21, }; static const unsigned int scifb3_data_mux[] = { SCIFB3_RXD_22_MARK, SCIFB3_TXD_21_MARK, }; static const unsigned int scifb3_clk_pins[] = { /* SCIFB3_SCK */ 23, }; static const unsigned int scifb3_clk_mux[] = { SCIFB3_SCK_23_MARK, }; static const unsigned int scifb3_ctrl_pins[] = { /* SCIFB3_RTS, SCIFB3_CTS */ 19, 20, }; static const unsigned int scifb3_ctrl_mux[] = { SCIFB3_RTS_19_MARK, SCIFB3_CTS_20_MARK, }; static const unsigned int scifb3_data_b_pins[] = { /* SCIFB3_RXD, SCIFB3_TXD */ 120, 121, }; static const unsigned int scifb3_data_b_mux[] = { SCIFB3_RXD_120_MARK, SCIFB3_TXD_121_MARK, }; static const unsigned int scifb3_clk_b_pins[] = { /* SCIFB3_SCK */ 40, }; static const unsigned int scifb3_clk_b_mux[] = { SCIFB3_SCK_40_MARK, }; static const unsigned int scifb3_ctrl_b_pins[] = { /* SCIFB3_RTS, SCIFB3_CTS */ 38, 39, }; static const unsigned int scifb3_ctrl_b_mux[] = { SCIFB3_RTS_38_MARK, SCIFB3_CTS_39_MARK, }; /* - SDHI0 ------------------------------------------------------------------ */ static const unsigned int sdhi0_data1_pins[] = { /* D0 */ 302, }; static const unsigned int sdhi0_data1_mux[] = { SDHID0_0_MARK, }; static const unsigned int sdhi0_data4_pins[] = { /* D[0:3] */ 302, 303, 304, 305, }; static const unsigned int sdhi0_data4_mux[] = { SDHID0_0_MARK, SDHID0_1_MARK, SDHID0_2_MARK, SDHID0_3_MARK, }; static const unsigned int sdhi0_ctrl_pins[] = { /* CLK, CMD */ 308, 306, }; static const unsigned int sdhi0_ctrl_mux[] = { SDHICLK0_MARK, SDHICMD0_MARK, }; static const unsigned int sdhi0_cd_pins[] = { /* CD */ 301, }; static const unsigned int sdhi0_cd_mux[] = { SDHICD0_MARK, }; static const unsigned int sdhi0_wp_pins[] = { /* WP */ 307, }; static const unsigned int sdhi0_wp_mux[] = { SDHIWP0_MARK, }; /* - SDHI1 ------------------------------------------------------------------ */ static const unsigned int sdhi1_data1_pins[] = { /* D0 */ 289, }; static const unsigned int sdhi1_data1_mux[] = { SDHID1_0_MARK, }; static const unsigned int sdhi1_data4_pins[] = { /* D[0:3] */ 289, 290, 291, 292, }; static const unsigned int sdhi1_data4_mux[] = { SDHID1_0_MARK, SDHID1_1_MARK, SDHID1_2_MARK, SDHID1_3_MARK, }; static const unsigned int sdhi1_ctrl_pins[] = { /* CLK, CMD */ 293, 294, }; static const unsigned int sdhi1_ctrl_mux[] = { SDHICLK1_MARK, SDHICMD1_MARK, }; /* - SDHI2 ------------------------------------------------------------------ */ static const unsigned int sdhi2_data1_pins[] = { /* D0 */ 295, }; static const unsigned int sdhi2_data1_mux[] = { SDHID2_0_MARK, }; static const unsigned int sdhi2_data4_pins[] = { /* D[0:3] */ 295, 296, 297, 298, }; static const unsigned int sdhi2_data4_mux[] = { SDHID2_0_MARK, SDHID2_1_MARK, SDHID2_2_MARK, SDHID2_3_MARK, }; static const unsigned int sdhi2_ctrl_pins[] = { /* CLK, CMD */ 299, 300, }; static const unsigned int sdhi2_ctrl_mux[] = { SDHICLK2_MARK, SDHICMD2_MARK, }; static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(irqc_irq0), SH_PFC_PIN_GROUP(irqc_irq1), SH_PFC_PIN_GROUP(irqc_irq2), SH_PFC_PIN_GROUP(irqc_irq3), SH_PFC_PIN_GROUP(irqc_irq4), SH_PFC_PIN_GROUP(irqc_irq5), SH_PFC_PIN_GROUP(irqc_irq6), SH_PFC_PIN_GROUP(irqc_irq7), SH_PFC_PIN_GROUP(irqc_irq8), SH_PFC_PIN_GROUP(irqc_irq9), SH_PFC_PIN_GROUP(irqc_irq10), SH_PFC_PIN_GROUP(irqc_irq11), SH_PFC_PIN_GROUP(irqc_irq12), SH_PFC_PIN_GROUP(irqc_irq13), SH_PFC_PIN_GROUP(irqc_irq14), SH_PFC_PIN_GROUP(irqc_irq15), SH_PFC_PIN_GROUP(irqc_irq16), SH_PFC_PIN_GROUP(irqc_irq17), SH_PFC_PIN_GROUP(irqc_irq18), SH_PFC_PIN_GROUP(irqc_irq19), SH_PFC_PIN_GROUP(irqc_irq20), SH_PFC_PIN_GROUP(irqc_irq21), SH_PFC_PIN_GROUP(irqc_irq22), SH_PFC_PIN_GROUP(irqc_irq23), SH_PFC_PIN_GROUP(irqc_irq24), SH_PFC_PIN_GROUP(irqc_irq25), SH_PFC_PIN_GROUP(irqc_irq26), SH_PFC_PIN_GROUP(irqc_irq27), SH_PFC_PIN_GROUP(irqc_irq28), SH_PFC_PIN_GROUP(irqc_irq29), SH_PFC_PIN_GROUP(irqc_irq30), SH_PFC_PIN_GROUP(irqc_irq31), SH_PFC_PIN_GROUP(irqc_irq32), SH_PFC_PIN_GROUP(irqc_irq33), SH_PFC_PIN_GROUP(irqc_irq34), SH_PFC_PIN_GROUP(irqc_irq35), SH_PFC_PIN_GROUP(irqc_irq36), SH_PFC_PIN_GROUP(irqc_irq37), SH_PFC_PIN_GROUP(irqc_irq38), SH_PFC_PIN_GROUP(irqc_irq39), SH_PFC_PIN_GROUP(irqc_irq40), SH_PFC_PIN_GROUP(irqc_irq41), SH_PFC_PIN_GROUP(irqc_irq42), SH_PFC_PIN_GROUP(irqc_irq43), SH_PFC_PIN_GROUP(irqc_irq44), SH_PFC_PIN_GROUP(irqc_irq45), SH_PFC_PIN_GROUP(irqc_irq46), SH_PFC_PIN_GROUP(irqc_irq47), SH_PFC_PIN_GROUP(irqc_irq48), SH_PFC_PIN_GROUP(irqc_irq49), SH_PFC_PIN_GROUP(irqc_irq50), SH_PFC_PIN_GROUP(irqc_irq51), SH_PFC_PIN_GROUP(irqc_irq52), SH_PFC_PIN_GROUP(irqc_irq53), SH_PFC_PIN_GROUP(irqc_irq54), SH_PFC_PIN_GROUP(irqc_irq55), SH_PFC_PIN_GROUP(irqc_irq56), SH_PFC_PIN_GROUP(irqc_irq57), SH_PFC_PIN_GROUP(mmc0_data1), SH_PFC_PIN_GROUP(mmc0_data4), SH_PFC_PIN_GROUP(mmc0_data8), SH_PFC_PIN_GROUP(mmc0_ctrl), SH_PFC_PIN_GROUP(mmc1_data1), SH_PFC_PIN_GROUP(mmc1_data4), SH_PFC_PIN_GROUP(mmc1_data8), SH_PFC_PIN_GROUP(mmc1_ctrl), SH_PFC_PIN_GROUP(scifa0_data), SH_PFC_PIN_GROUP(scifa0_clk), SH_PFC_PIN_GROUP(scifa0_ctrl), SH_PFC_PIN_GROUP(scifa1_data), SH_PFC_PIN_GROUP(scifa1_clk), SH_PFC_PIN_GROUP(scifa1_ctrl), SH_PFC_PIN_GROUP(scifb0_data), SH_PFC_PIN_GROUP(scifb0_clk), SH_PFC_PIN_GROUP(scifb0_ctrl), SH_PFC_PIN_GROUP(scifb1_data), SH_PFC_PIN_GROUP(scifb1_clk), SH_PFC_PIN_GROUP(scifb1_ctrl), SH_PFC_PIN_GROUP(scifb1_data_b), SH_PFC_PIN_GROUP(scifb1_clk_b), SH_PFC_PIN_GROUP(scifb1_ctrl_b), SH_PFC_PIN_GROUP(scifb2_data), SH_PFC_PIN_GROUP(scifb2_clk), SH_PFC_PIN_GROUP(scifb2_ctrl), SH_PFC_PIN_GROUP(scifb2_data_b), SH_PFC_PIN_GROUP(scifb2_clk_b), SH_PFC_PIN_GROUP(scifb2_ctrl_b), SH_PFC_PIN_GROUP(scifb3_data), SH_PFC_PIN_GROUP(scifb3_clk), SH_PFC_PIN_GROUP(scifb3_ctrl), SH_PFC_PIN_GROUP(scifb3_data_b), SH_PFC_PIN_GROUP(scifb3_clk_b), SH_PFC_PIN_GROUP(scifb3_ctrl_b), SH_PFC_PIN_GROUP(sdhi0_data1), SH_PFC_PIN_GROUP(sdhi0_data4), SH_PFC_PIN_GROUP(sdhi0_ctrl), SH_PFC_PIN_GROUP(sdhi0_cd), SH_PFC_PIN_GROUP(sdhi0_wp), SH_PFC_PIN_GROUP(sdhi1_data1), SH_PFC_PIN_GROUP(sdhi1_data4), SH_PFC_PIN_GROUP(sdhi1_ctrl), SH_PFC_PIN_GROUP(sdhi2_data1), SH_PFC_PIN_GROUP(sdhi2_data4), SH_PFC_PIN_GROUP(sdhi2_ctrl), }; static const char * const irqc_groups[] = { "irqc_irq0", "irqc_irq1", "irqc_irq2", "irqc_irq3", "irqc_irq4", "irqc_irq5", "irqc_irq6", "irqc_irq7", "irqc_irq8", "irqc_irq9", "irqc_irq10", "irqc_irq11", "irqc_irq12", "irqc_irq13", "irqc_irq14", "irqc_irq15", "irqc_irq16", "irqc_irq17", "irqc_irq18", "irqc_irq19", "irqc_irq20", "irqc_irq21", "irqc_irq22", "irqc_irq23", "irqc_irq24", "irqc_irq25", "irqc_irq26", "irqc_irq27", "irqc_irq28", "irqc_irq29", "irqc_irq30", "irqc_irq31", "irqc_irq32", "irqc_irq33", "irqc_irq34", "irqc_irq35", "irqc_irq36", "irqc_irq37", "irqc_irq38", "irqc_irq39", "irqc_irq40", "irqc_irq41", "irqc_irq42", "irqc_irq43", "irqc_irq44", "irqc_irq45", "irqc_irq46", "irqc_irq47", "irqc_irq48", "irqc_irq49", "irqc_irq50", "irqc_irq51", "irqc_irq52", "irqc_irq53", "irqc_irq54", "irqc_irq55", "irqc_irq56", "irqc_irq57", }; static const char * const mmc0_groups[] = { "mmc0_data1", "mmc0_data4", "mmc0_data8", "mmc0_ctrl", }; static const char * const mmc1_groups[] = { "mmc1_data1", "mmc1_data4", "mmc1_data8", "mmc1_ctrl", }; static const char * const scifa0_groups[] = { "scifa0_data", "scifa0_clk", "scifa0_ctrl", }; static const char * const scifa1_groups[] = { "scifa1_data", "scifa1_clk", "scifa1_ctrl", }; static const char * const scifb0_groups[] = { "scifb0_data", "scifb0_clk", "scifb0_ctrl", }; static const char * const scifb1_groups[] = { "scifb1_data", "scifb1_clk", "scifb1_ctrl", "scifb1_data_b", "scifb1_clk_b", "scifb1_ctrl_b", }; static const char * const scifb2_groups[] = { "scifb2_data", "scifb2_clk", "scifb2_ctrl", "scifb2_data_b", "scifb2_clk_b", "scifb2_ctrl_b", }; static const char * const scifb3_groups[] = { "scifb3_data", "scifb3_clk", "scifb3_ctrl", "scifb3_data_b", "scifb3_clk_b", "scifb3_ctrl_b", }; static const char * const sdhi0_groups[] = { "sdhi0_data1", "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd", "sdhi0_wp", }; static const char * const sdhi1_groups[] = { "sdhi1_data1", "sdhi1_data4", "sdhi1_ctrl", }; static const char * const sdhi2_groups[] = { "sdhi2_data1", "sdhi2_data4", "sdhi2_ctrl", }; static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(irqc), SH_PFC_FUNCTION(mmc0), SH_PFC_FUNCTION(mmc1), SH_PFC_FUNCTION(scifa0), SH_PFC_FUNCTION(scifa1), SH_PFC_FUNCTION(scifb0), SH_PFC_FUNCTION(scifb1), SH_PFC_FUNCTION(scifb2), SH_PFC_FUNCTION(scifb3), SH_PFC_FUNCTION(sdhi0), SH_PFC_FUNCTION(sdhi1), SH_PFC_FUNCTION(sdhi2), }; static const struct pinmux_cfg_reg pinmux_config_regs[] = { PORTCR(0, 0xe6050000), PORTCR(1, 0xe6050001), PORTCR(2, 0xe6050002), PORTCR(3, 0xe6050003), PORTCR(4, 0xe6050004), PORTCR(5, 0xe6050005), PORTCR(6, 0xe6050006), PORTCR(7, 0xe6050007), PORTCR(8, 0xe6050008), PORTCR(9, 0xe6050009), PORTCR(10, 0xe605000A), PORTCR(11, 0xe605000B), PORTCR(12, 0xe605000C), PORTCR(13, 0xe605000D), PORTCR(14, 0xe605000E), PORTCR(15, 0xe605000F), PORTCR(16, 0xe6050010), PORTCR(17, 0xe6050011), PORTCR(18, 0xe6050012), PORTCR(19, 0xe6050013), PORTCR(20, 0xe6050014), PORTCR(21, 0xe6050015), PORTCR(22, 0xe6050016), PORTCR(23, 0xe6050017), PORTCR(24, 0xe6050018), PORTCR(25, 0xe6050019), PORTCR(26, 0xe605001A), PORTCR(27, 0xe605001B), PORTCR(28, 0xe605001C), PORTCR(29, 0xe605001D), PORTCR(30, 0xe605001E), PORTCR(32, 0xe6051020), PORTCR(33, 0xe6051021), PORTCR(34, 0xe6051022), PORTCR(35, 0xe6051023), PORTCR(36, 0xe6051024), PORTCR(37, 0xe6051025), PORTCR(38, 0xe6051026), PORTCR(39, 0xe6051027), PORTCR(40, 0xe6051028), PORTCR(64, 0xe6050040), PORTCR(65, 0xe6050041), PORTCR(66, 0xe6050042), PORTCR(67, 0xe6050043), PORTCR(68, 0xe6050044), PORTCR(69, 0xe6050045), PORTCR(70, 0xe6050046), PORTCR(71, 0xe6050047), PORTCR(72, 0xe6050048), PORTCR(73, 0xe6050049), PORTCR(74, 0xe605004A), PORTCR(75, 0xe605004B), PORTCR(76, 0xe605004C), PORTCR(77, 0xe605004D), PORTCR(78, 0xe605004E), PORTCR(79, 0xe605004F), PORTCR(80, 0xe6050050), PORTCR(81, 0xe6050051), PORTCR(82, 0xe6050052), PORTCR(83, 0xe6050053), PORTCR(84, 0xe6050054), PORTCR(85, 0xe6050055), PORTCR(96, 0xe6051060), PORTCR(97, 0xe6051061), PORTCR(98, 0xe6051062), PORTCR(99, 0xe6051063), PORTCR(100, 0xe6051064), PORTCR(101, 0xe6051065), PORTCR(102, 0xe6051066), PORTCR(103, 0xe6051067), PORTCR(104, 0xe6051068), PORTCR(105, 0xe6051069), PORTCR(106, 0xe605106A), PORTCR(107, 0xe605106B), PORTCR(108, 0xe605106C), PORTCR(109, 0xe605106D), PORTCR(110, 0xe605106E), PORTCR(111, 0xe605106F), PORTCR(112, 0xe6051070), PORTCR(113, 0xe6051071), PORTCR(114, 0xe6051072), PORTCR(115, 0xe6051073), PORTCR(116, 0xe6051074), PORTCR(117, 0xe6051075), PORTCR(118, 0xe6051076), PORTCR(119, 0xe6051077), PORTCR(120, 0xe6051078), PORTCR(121, 0xe6051079), PORTCR(122, 0xe605107A), PORTCR(123, 0xe605107B), PORTCR(124, 0xe605107C), PORTCR(125, 0xe605107D), PORTCR(126, 0xe605107E), PORTCR(128, 0xe6051080), PORTCR(129, 0xe6051081), PORTCR(130, 0xe6051082), PORTCR(131, 0xe6051083), PORTCR(132, 0xe6051084), PORTCR(133, 0xe6051085), PORTCR(134, 0xe6051086), PORTCR(160, 0xe60520A0), PORTCR(161, 0xe60520A1), PORTCR(162, 0xe60520A2), PORTCR(163, 0xe60520A3), PORTCR(164, 0xe60520A4), PORTCR(165, 0xe60520A5), PORTCR(166, 0xe60520A6), PORTCR(167, 0xe60520A7), PORTCR(168, 0xe60520A8), PORTCR(169, 0xe60520A9), PORTCR(170, 0xe60520AA), PORTCR(171, 0xe60520AB), PORTCR(172, 0xe60520AC), PORTCR(173, 0xe60520AD), PORTCR(174, 0xe60520AE), PORTCR(175, 0xe60520AF), PORTCR(176, 0xe60520B0), PORTCR(177, 0xe60520B1), PORTCR(178, 0xe60520B2), PORTCR(192, 0xe60520C0), PORTCR(193, 0xe60520C1), PORTCR(194, 0xe60520C2), PORTCR(195, 0xe60520C3), PORTCR(196, 0xe60520C4), PORTCR(197, 0xe60520C5), PORTCR(198, 0xe60520C6), PORTCR(199, 0xe60520C7), PORTCR(200, 0xe60520C8), PORTCR(201, 0xe60520C9), PORTCR(202, 0xe60520CA), PORTCR(203, 0xe60520CB), PORTCR(204, 0xe60520CC), PORTCR(205, 0xe60520CD), PORTCR(206, 0xe60520CE), PORTCR(207, 0xe60520CF), PORTCR(208, 0xe60520D0), PORTCR(209, 0xe60520D1), PORTCR(210, 0xe60520D2), PORTCR(211, 0xe60520D3), PORTCR(212, 0xe60520D4), PORTCR(213, 0xe60520D5), PORTCR(214, 0xe60520D6), PORTCR(215, 0xe60520D7), PORTCR(216, 0xe60520D8), PORTCR(217, 0xe60520D9), PORTCR(218, 0xe60520DA), PORTCR(219, 0xe60520DB), PORTCR(220, 0xe60520DC), PORTCR(221, 0xe60520DD), PORTCR(222, 0xe60520DE), PORTCR(224, 0xe60520E0), PORTCR(225, 0xe60520E1), PORTCR(226, 0xe60520E2), PORTCR(227, 0xe60520E3), PORTCR(228, 0xe60520E4), PORTCR(229, 0xe60520E5), PORTCR(230, 0xe60520e6), PORTCR(231, 0xe60520E7), PORTCR(232, 0xe60520E8), PORTCR(233, 0xe60520E9), PORTCR(234, 0xe60520EA), PORTCR(235, 0xe60520EB), PORTCR(236, 0xe60520EC), PORTCR(237, 0xe60520ED), PORTCR(238, 0xe60520EE), PORTCR(239, 0xe60520EF), PORTCR(240, 0xe60520F0), PORTCR(241, 0xe60520F1), PORTCR(242, 0xe60520F2), PORTCR(243, 0xe60520F3), PORTCR(244, 0xe60520F4), PORTCR(245, 0xe60520F5), PORTCR(246, 0xe60520F6), PORTCR(247, 0xe60520F7), PORTCR(248, 0xe60520F8), PORTCR(249, 0xe60520F9), PORTCR(250, 0xe60520FA), PORTCR(256, 0xe6052100), PORTCR(257, 0xe6052101), PORTCR(258, 0xe6052102), PORTCR(259, 0xe6052103), PORTCR(260, 0xe6052104), PORTCR(261, 0xe6052105), PORTCR(262, 0xe6052106), PORTCR(263, 0xe6052107), PORTCR(264, 0xe6052108), PORTCR(265, 0xe6052109), PORTCR(266, 0xe605210A), PORTCR(267, 0xe605210B), PORTCR(268, 0xe605210C), PORTCR(269, 0xe605210D), PORTCR(270, 0xe605210E), PORTCR(271, 0xe605210F), PORTCR(272, 0xe6052110), PORTCR(273, 0xe6052111), PORTCR(274, 0xe6052112), PORTCR(275, 0xe6052113), PORTCR(276, 0xe6052114), PORTCR(277, 0xe6052115), PORTCR(278, 0xe6052116), PORTCR(279, 0xe6052117), PORTCR(280, 0xe6052118), PORTCR(281, 0xe6052119), PORTCR(282, 0xe605211A), PORTCR(283, 0xe605211B), PORTCR(288, 0xe6053120), PORTCR(289, 0xe6053121), PORTCR(290, 0xe6053122), PORTCR(291, 0xe6053123), PORTCR(292, 0xe6053124), PORTCR(293, 0xe6053125), PORTCR(294, 0xe6053126), PORTCR(295, 0xe6053127), PORTCR(296, 0xe6053128), PORTCR(297, 0xe6053129), PORTCR(298, 0xe605312A), PORTCR(299, 0xe605312B), PORTCR(300, 0xe605312C), PORTCR(301, 0xe605312D), PORTCR(302, 0xe605312E), PORTCR(303, 0xe605312F), PORTCR(304, 0xe6053130), PORTCR(305, 0xe6053131), PORTCR(306, 0xe6053132), PORTCR(307, 0xe6053133), PORTCR(308, 0xe6053134), PORTCR(320, 0xe6053140), PORTCR(321, 0xe6053141), PORTCR(322, 0xe6053142), PORTCR(323, 0xe6053143), PORTCR(324, 0xe6053144), PORTCR(325, 0xe6053145), PORTCR(326, 0xe6053146), PORTCR(327, 0xe6053147), PORTCR(328, 0xe6053148), PORTCR(329, 0xe6053149), { PINMUX_CFG_REG("MSEL1CR", 0xe605800c, 32, 1) { MSEL1CR_31_0, MSEL1CR_31_1, 0, 0, 0, 0, 0, 0, MSEL1CR_27_0, MSEL1CR_27_1, 0, 0, MSEL1CR_25_0, MSEL1CR_25_1, MSEL1CR_24_0, MSEL1CR_24_1, 0, 0, MSEL1CR_22_0, MSEL1CR_22_1, MSEL1CR_21_0, MSEL1CR_21_1, MSEL1CR_20_0, MSEL1CR_20_1, MSEL1CR_19_0, MSEL1CR_19_1, MSEL1CR_18_0, MSEL1CR_18_1, MSEL1CR_17_0, MSEL1CR_17_1, MSEL1CR_16_0, MSEL1CR_16_1, MSEL1CR_15_0, MSEL1CR_15_1, MSEL1CR_14_0, MSEL1CR_14_1, MSEL1CR_13_0, MSEL1CR_13_1, MSEL1CR_12_0, MSEL1CR_12_1, MSEL1CR_11_0, MSEL1CR_11_1, MSEL1CR_10_0, MSEL1CR_10_1, MSEL1CR_09_0, MSEL1CR_09_1, MSEL1CR_08_0, MSEL1CR_08_1, MSEL1CR_07_0, MSEL1CR_07_1, MSEL1CR_06_0, MSEL1CR_06_1, MSEL1CR_05_0, MSEL1CR_05_1, MSEL1CR_04_0, MSEL1CR_04_1, MSEL1CR_03_0, MSEL1CR_03_1, MSEL1CR_02_0, MSEL1CR_02_1, MSEL1CR_01_0, MSEL1CR_01_1, MSEL1CR_00_0, MSEL1CR_00_1, } }, { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1) { MSEL3CR_31_0, MSEL3CR_31_1, 0, 0, 0, 0, MSEL3CR_28_0, MSEL3CR_28_1, MSEL3CR_27_0, MSEL3CR_27_1, MSEL3CR_26_0, MSEL3CR_26_1, 0, 0, 0, 0, MSEL3CR_23_0, MSEL3CR_23_1, MSEL3CR_22_0, MSEL3CR_22_1, MSEL3CR_21_0, MSEL3CR_21_1, MSEL3CR_20_0, MSEL3CR_20_1, MSEL3CR_19_0, MSEL3CR_19_1, MSEL3CR_18_0, MSEL3CR_18_1, MSEL3CR_17_0, MSEL3CR_17_1, MSEL3CR_16_0, MSEL3CR_16_1, MSEL3CR_15_0, MSEL3CR_15_1, 0, 0, 0, 0, MSEL3CR_12_0, MSEL3CR_12_1, MSEL3CR_11_0, MSEL3CR_11_1, MSEL3CR_10_0, MSEL3CR_10_1, MSEL3CR_09_0, MSEL3CR_09_1, 0, 0, 0, 0, MSEL3CR_06_0, MSEL3CR_06_1, 0, 0, 0, 0, MSEL3CR_03_0, MSEL3CR_03_1, 0, 0, MSEL3CR_01_0, MSEL3CR_01_1, MSEL3CR_00_0, MSEL3CR_00_1, } }, { PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1) { 0, 0, MSEL4CR_30_0, MSEL4CR_30_1, MSEL4CR_29_0, MSEL4CR_29_1, MSEL4CR_28_0, MSEL4CR_28_1, MSEL4CR_27_0, MSEL4CR_27_1, MSEL4CR_26_0, MSEL4CR_26_1, MSEL4CR_25_0, MSEL4CR_25_1, MSEL4CR_24_0, MSEL4CR_24_1, MSEL4CR_23_0, MSEL4CR_23_1, MSEL4CR_22_0, MSEL4CR_22_1, MSEL4CR_21_0, MSEL4CR_21_1, MSEL4CR_20_0, MSEL4CR_20_1, MSEL4CR_19_0, MSEL4CR_19_1, MSEL4CR_18_0, MSEL4CR_18_1, MSEL4CR_17_0, MSEL4CR_17_1, MSEL4CR_16_0, MSEL4CR_16_1, MSEL4CR_15_0, MSEL4CR_15_1, MSEL4CR_14_0, MSEL4CR_14_1, MSEL4CR_13_0, MSEL4CR_13_1, MSEL4CR_12_0, MSEL4CR_12_1, MSEL4CR_11_0, MSEL4CR_11_1, MSEL4CR_10_0, MSEL4CR_10_1, MSEL4CR_09_0, MSEL4CR_09_1, 0, 0, MSEL4CR_07_0, MSEL4CR_07_1, 0, 0, 0, 0, MSEL4CR_04_0, MSEL4CR_04_1, 0, 0, 0, 0, MSEL4CR_01_0, MSEL4CR_01_1, 0, 0, } }, { PINMUX_CFG_REG("MSEL5CR", 0xe6058028, 32, 1) { MSEL5CR_31_0, MSEL5CR_31_1, MSEL5CR_30_0, MSEL5CR_30_1, MSEL5CR_29_0, MSEL5CR_29_1, MSEL5CR_28_0, MSEL5CR_28_1, MSEL5CR_27_0, MSEL5CR_27_1, MSEL5CR_26_0, MSEL5CR_26_1, MSEL5CR_25_0, MSEL5CR_25_1, MSEL5CR_24_0, MSEL5CR_24_1, MSEL5CR_23_0, MSEL5CR_23_1, MSEL5CR_22_0, MSEL5CR_22_1, MSEL5CR_21_0, MSEL5CR_21_1, MSEL5CR_20_0, MSEL5CR_20_1, MSEL5CR_19_0, MSEL5CR_19_1, MSEL5CR_18_0, MSEL5CR_18_1, MSEL5CR_17_0, MSEL5CR_17_1, MSEL5CR_16_0, MSEL5CR_16_1, MSEL5CR_15_0, MSEL5CR_15_1, MSEL5CR_14_0, MSEL5CR_14_1, MSEL5CR_13_0, MSEL5CR_13_1, MSEL5CR_12_0, MSEL5CR_12_1, MSEL5CR_11_0, MSEL5CR_11_1, MSEL5CR_10_0, MSEL5CR_10_1, MSEL5CR_09_0, MSEL5CR_09_1, MSEL5CR_08_0, MSEL5CR_08_1, MSEL5CR_07_0, MSEL5CR_07_1, MSEL5CR_06_0, MSEL5CR_06_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("MSEL8CR", 0xe6058034, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MSEL8CR_16_0, MSEL8CR_16_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MSEL8CR_01_0, MSEL8CR_01_1, MSEL8CR_00_0, MSEL8CR_00_1, } }, { }, }; static const struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054000, 32) { 0, PORT30_DATA, PORT29_DATA, PORT28_DATA, PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA, PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA, PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA, PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA, PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA, PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA, PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA, } }, { PINMUX_DATA_REG("PORTD063_032DR", 0xe6055000, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT40_DATA, PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA, PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA, } }, { PINMUX_DATA_REG("PORTL095_064DR", 0xe6054004, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT85_DATA, PORT84_DATA, PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA, PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA, PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA, PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA, PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA, } }, { PINMUX_DATA_REG("PORTD127_096DR", 0xe6055004, 32) { 0, PORT126_DATA, PORT125_DATA, PORT124_DATA, PORT123_DATA, PORT122_DATA, PORT121_DATA, PORT120_DATA, PORT119_DATA, PORT118_DATA, PORT117_DATA, PORT116_DATA, PORT115_DATA, PORT114_DATA, PORT113_DATA, PORT112_DATA, PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA, PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA, PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA, PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA, } }, { PINMUX_DATA_REG("PORTD159_128DR", 0xe6055008, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT134_DATA, PORT133_DATA, PORT132_DATA, PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA, } }, { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056000, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT178_DATA, PORT177_DATA, PORT176_DATA, PORT175_DATA, PORT174_DATA, PORT173_DATA, PORT172_DATA, PORT171_DATA, PORT170_DATA, PORT169_DATA, PORT168_DATA, PORT167_DATA, PORT166_DATA, PORT165_DATA, PORT164_DATA, PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA, } }, { PINMUX_DATA_REG("PORTR223_192DR", 0xe6056004, 32) { 0, PORT222_DATA, PORT221_DATA, PORT220_DATA, PORT219_DATA, PORT218_DATA, PORT217_DATA, PORT216_DATA, PORT215_DATA, PORT214_DATA, PORT213_DATA, PORT212_DATA, PORT211_DATA, PORT210_DATA, PORT209_DATA, PORT208_DATA, PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA, PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA, PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA, PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA, } }, { PINMUX_DATA_REG("PORTR255_224DR", 0xe6056008, 32) { 0, 0, 0, 0, 0, PORT250_DATA, PORT249_DATA, PORT248_DATA, PORT247_DATA, PORT246_DATA, PORT245_DATA, PORT244_DATA, PORT243_DATA, PORT242_DATA, PORT241_DATA, PORT240_DATA, PORT239_DATA, PORT238_DATA, PORT237_DATA, PORT236_DATA, PORT235_DATA, PORT234_DATA, PORT233_DATA, PORT232_DATA, PORT231_DATA, PORT230_DATA, PORT229_DATA, PORT228_DATA, PORT227_DATA, PORT226_DATA, PORT225_DATA, PORT224_DATA, } }, { PINMUX_DATA_REG("PORTR287_256DR", 0xe605600C, 32) { 0, 0, 0, 0, PORT283_DATA, PORT282_DATA, PORT281_DATA, PORT280_DATA, PORT279_DATA, PORT278_DATA, PORT277_DATA, PORT276_DATA, PORT275_DATA, PORT274_DATA, PORT273_DATA, PORT272_DATA, PORT271_DATA, PORT270_DATA, PORT269_DATA, PORT268_DATA, PORT267_DATA, PORT266_DATA, PORT265_DATA, PORT264_DATA, PORT263_DATA, PORT262_DATA, PORT261_DATA, PORT260_DATA, PORT259_DATA, PORT258_DATA, PORT257_DATA, PORT256_DATA, } }, { PINMUX_DATA_REG("PORTU319_288DR", 0xe6057000, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT308_DATA, PORT307_DATA, PORT306_DATA, PORT305_DATA, PORT304_DATA, PORT303_DATA, PORT302_DATA, PORT301_DATA, PORT300_DATA, PORT299_DATA, PORT298_DATA, PORT297_DATA, PORT296_DATA, PORT295_DATA, PORT294_DATA, PORT293_DATA, PORT292_DATA, PORT291_DATA, PORT290_DATA, PORT289_DATA, PORT288_DATA, } }, { PINMUX_DATA_REG("PORTU351_320DR", 0xe6057004, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT329_DATA, PORT328_DATA, PORT327_DATA, PORT326_DATA, PORT325_DATA, PORT324_DATA, PORT323_DATA, PORT322_DATA, PORT321_DATA, PORT320_DATA, } }, { }, }; static const struct pinmux_irq pinmux_irqs[] = { PINMUX_IRQ(irq_pin(0), 0), PINMUX_IRQ(irq_pin(1), 1), PINMUX_IRQ(irq_pin(2), 2), PINMUX_IRQ(irq_pin(3), 3), PINMUX_IRQ(irq_pin(4), 4), PINMUX_IRQ(irq_pin(5), 5), PINMUX_IRQ(irq_pin(6), 6), PINMUX_IRQ(irq_pin(7), 7), PINMUX_IRQ(irq_pin(8), 8), PINMUX_IRQ(irq_pin(9), 9), PINMUX_IRQ(irq_pin(10), 10), PINMUX_IRQ(irq_pin(11), 11), PINMUX_IRQ(irq_pin(12), 12), PINMUX_IRQ(irq_pin(13), 13), PINMUX_IRQ(irq_pin(14), 14), PINMUX_IRQ(irq_pin(15), 15), PINMUX_IRQ(irq_pin(16), 320), PINMUX_IRQ(irq_pin(17), 321), PINMUX_IRQ(irq_pin(18), 85), PINMUX_IRQ(irq_pin(19), 84), PINMUX_IRQ(irq_pin(20), 160), PINMUX_IRQ(irq_pin(21), 161), PINMUX_IRQ(irq_pin(22), 162), PINMUX_IRQ(irq_pin(23), 163), PINMUX_IRQ(irq_pin(24), 175), PINMUX_IRQ(irq_pin(25), 176), PINMUX_IRQ(irq_pin(26), 177), PINMUX_IRQ(irq_pin(27), 178), PINMUX_IRQ(irq_pin(28), 322), PINMUX_IRQ(irq_pin(29), 323), PINMUX_IRQ(irq_pin(30), 324), PINMUX_IRQ(irq_pin(31), 192), PINMUX_IRQ(irq_pin(32), 193), PINMUX_IRQ(irq_pin(33), 194), PINMUX_IRQ(irq_pin(34), 195), PINMUX_IRQ(irq_pin(35), 196), PINMUX_IRQ(irq_pin(36), 197), PINMUX_IRQ(irq_pin(37), 198), PINMUX_IRQ(irq_pin(38), 199), PINMUX_IRQ(irq_pin(39), 200), PINMUX_IRQ(irq_pin(40), 66), PINMUX_IRQ(irq_pin(41), 102), PINMUX_IRQ(irq_pin(42), 103), PINMUX_IRQ(irq_pin(43), 109), PINMUX_IRQ(irq_pin(44), 110), PINMUX_IRQ(irq_pin(45), 111), PINMUX_IRQ(irq_pin(46), 112), PINMUX_IRQ(irq_pin(47), 113), PINMUX_IRQ(irq_pin(48), 114), PINMUX_IRQ(irq_pin(49), 115), PINMUX_IRQ(irq_pin(50), 301), PINMUX_IRQ(irq_pin(51), 290), PINMUX_IRQ(irq_pin(52), 296), PINMUX_IRQ(irq_pin(53), 325), PINMUX_IRQ(irq_pin(54), 326), PINMUX_IRQ(irq_pin(55), 327), PINMUX_IRQ(irq_pin(56), 328), PINMUX_IRQ(irq_pin(57), 329), }; #define PORTCR_PULMD_OFF (0 << 6) #define PORTCR_PULMD_DOWN (2 << 6) #define PORTCR_PULMD_UP (3 << 6) #define PORTCR_PULMD_MASK (3 << 6) static const unsigned int r8a73a4_portcr_offsets[] = { 0x00000000, 0x00001000, 0x00000000, 0x00001000, 0x00001000, 0x00002000, 0x00002000, 0x00002000, 0x00002000, 0x00003000, 0x00003000, }; static unsigned int r8a73a4_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin) { void __iomem *addr; addr = pfc->windows->virt + r8a73a4_portcr_offsets[pin >> 5] + pin; switch (ioread8(addr) & PORTCR_PULMD_MASK) { case PORTCR_PULMD_UP: return PIN_CONFIG_BIAS_PULL_UP; case PORTCR_PULMD_DOWN: return PIN_CONFIG_BIAS_PULL_DOWN; case PORTCR_PULMD_OFF: default: return PIN_CONFIG_BIAS_DISABLE; } } static void r8a73a4_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin, unsigned int bias) { void __iomem *addr; u32 value; addr = pfc->windows->virt + r8a73a4_portcr_offsets[pin >> 5] + pin; value = ioread8(addr) & ~PORTCR_PULMD_MASK; switch (bias) { case PIN_CONFIG_BIAS_PULL_UP: value |= PORTCR_PULMD_UP; break; case PIN_CONFIG_BIAS_PULL_DOWN: value |= PORTCR_PULMD_DOWN; break; } iowrite8(value, addr); } static const struct sh_pfc_soc_operations r8a73a4_pfc_ops = { .get_bias = r8a73a4_pinmux_get_bias, .set_bias = r8a73a4_pinmux_set_bias, }; const struct sh_pfc_soc_info r8a73a4_pinmux_info = { .name = "r8a73a4_pfc", .ops = &r8a73a4_pfc_ops, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .pins = pinmux_pins, .nr_pins = ARRAY_SIZE(pinmux_pins), .groups = pinmux_groups, .nr_groups = ARRAY_SIZE(pinmux_groups), .functions = pinmux_functions, .nr_functions = ARRAY_SIZE(pinmux_functions), .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), .gpio_irq = pinmux_irqs, .gpio_irq_size = ARRAY_SIZE(pinmux_irqs), };
gpl-2.0
elevendroids/kernel-thalamus-custom
net/sched/sch_hfsc.c
763
41643
/* * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * 2003-10-17 - Ported from altq */ /* * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved. * * Permission to use, copy, modify, and distribute this software and * its documentation is hereby granted (including for commercial or * for-profit use), provided that both the copyright notice and this * permission notice appear in all copies of the software, derivative * works, or modified versions, and any portions thereof. * * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * * Carnegie Mellon encourages (but does not require) users of this * software to return any improvements or extensions that they make, * and to grant Carnegie Mellon the rights to redistribute these * changes without encumbrance. */ /* * H-FSC is described in Proceedings of SIGCOMM'97, * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing, * Real-Time and Priority Service" * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng. * * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing. * when a class has an upperlimit, the fit-time is computed from the * upperlimit service curve. the link-sharing scheduler does not schedule * a class whose fit-time exceeds the current time. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/rbtree.h> #include <linux/init.h> #include <linux/rtnetlink.h> #include <linux/pkt_sched.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <asm/div64.h> /* * kernel internal service curve representation: * coordinates are given by 64 bit unsigned integers. * x-axis: unit is clock count. * y-axis: unit is byte. * * The service curve parameters are converted to the internal * representation. The slope values are scaled to avoid overflow. * the inverse slope values as well as the y-projection of the 1st * segment are kept in order to avoid 64-bit divide operations * that are expensive on 32-bit architectures. */ struct internal_sc { u64 sm1; /* scaled slope of the 1st segment */ u64 ism1; /* scaled inverse-slope of the 1st segment */ u64 dx; /* the x-projection of the 1st segment */ u64 dy; /* the y-projection of the 1st segment */ u64 sm2; /* scaled slope of the 2nd segment */ u64 ism2; /* scaled inverse-slope of the 2nd segment */ }; /* runtime service curve */ struct runtime_sc { u64 x; /* current starting position on x-axis */ u64 y; /* current starting position on y-axis */ u64 sm1; /* scaled slope of the 1st segment */ u64 ism1; /* scaled inverse-slope of the 1st segment */ u64 dx; /* the x-projection of the 1st segment */ u64 dy; /* the y-projection of the 1st segment */ u64 sm2; /* scaled slope of the 2nd segment */ u64 ism2; /* scaled inverse-slope of the 2nd segment */ }; enum hfsc_class_flags { HFSC_RSC = 0x1, HFSC_FSC = 0x2, HFSC_USC = 0x4 }; struct hfsc_class { struct Qdisc_class_common cl_common; unsigned int refcnt; /* usage count */ struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; struct gnet_stats_rate_est rate_est; unsigned int level; /* class level in hierarchy */ struct tcf_proto *filter_list; /* filter list */ unsigned int filter_cnt; /* filter count */ struct hfsc_sched *sched; /* scheduler data */ struct hfsc_class *cl_parent; /* parent class */ struct list_head siblings; /* sibling classes */ struct list_head children; /* child classes */ struct Qdisc *qdisc; /* leaf qdisc */ struct rb_node el_node; /* qdisc's eligible tree member */ struct rb_root vt_tree; /* active children sorted by cl_vt */ struct rb_node vt_node; /* parent's vt_tree member */ struct rb_root cf_tree; /* active children sorted by cl_f */ struct rb_node cf_node; /* parent's cf_heap member */ struct list_head dlist; /* drop list member */ u64 cl_total; /* total work in bytes */ u64 cl_cumul; /* cumulative work in bytes done by real-time criteria */ u64 cl_d; /* deadline*/ u64 cl_e; /* eligible time */ u64 cl_vt; /* virtual time */ u64 cl_f; /* time when this class will fit for link-sharing, max(myf, cfmin) */ u64 cl_myf; /* my fit-time (calculated from this class's own upperlimit curve) */ u64 cl_myfadj; /* my fit-time adjustment (to cancel history dependence) */ u64 cl_cfmin; /* earliest children's fit-time (used with cl_myf to obtain cl_f) */ u64 cl_cvtmin; /* minimal virtual time among the children fit for link-sharing (monotonic within a period) */ u64 cl_vtadj; /* intra-period cumulative vt adjustment */ u64 cl_vtoff; /* inter-period cumulative vt offset */ u64 cl_cvtmax; /* max child's vt in the last period */ u64 cl_cvtoff; /* cumulative cvtmax of all periods */ u64 cl_pcvtoff; /* parent's cvtoff at initialization time */ struct internal_sc cl_rsc; /* internal real-time service curve */ struct internal_sc cl_fsc; /* internal fair service curve */ struct internal_sc cl_usc; /* internal upperlimit service curve */ struct runtime_sc cl_deadline; /* deadline curve */ struct runtime_sc cl_eligible; /* eligible curve */ struct runtime_sc cl_virtual; /* virtual curve */ struct runtime_sc cl_ulimit; /* upperlimit curve */ unsigned long cl_flags; /* which curves are valid */ unsigned long cl_vtperiod; /* vt period sequence number */ unsigned long cl_parentperiod;/* parent's vt period sequence number*/ unsigned long cl_nactive; /* number of active children */ }; struct hfsc_sched { u16 defcls; /* default class id */ struct hfsc_class root; /* root class */ struct Qdisc_class_hash clhash; /* class hash */ struct rb_root eligible; /* eligible tree */ struct list_head droplist; /* active leaf class list (for dropping) */ struct qdisc_watchdog watchdog; /* watchdog timer */ }; #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ /* * eligible tree holds backlogged classes being sorted by their eligible times. * there is one eligible tree per hfsc instance. */ static void eltree_insert(struct hfsc_class *cl) { struct rb_node **p = &cl->sched->eligible.rb_node; struct rb_node *parent = NULL; struct hfsc_class *cl1; while (*p != NULL) { parent = *p; cl1 = rb_entry(parent, struct hfsc_class, el_node); if (cl->cl_e >= cl1->cl_e) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&cl->el_node, parent, p); rb_insert_color(&cl->el_node, &cl->sched->eligible); } static inline void eltree_remove(struct hfsc_class *cl) { rb_erase(&cl->el_node, &cl->sched->eligible); } static inline void eltree_update(struct hfsc_class *cl) { eltree_remove(cl); eltree_insert(cl); } /* find the class with the minimum deadline among the eligible classes */ static inline struct hfsc_class * eltree_get_mindl(struct hfsc_sched *q, u64 cur_time) { struct hfsc_class *p, *cl = NULL; struct rb_node *n; for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { p = rb_entry(n, struct hfsc_class, el_node); if (p->cl_e > cur_time) break; if (cl == NULL || p->cl_d < cl->cl_d) cl = p; } return cl; } /* find the class with minimum eligible time among the eligible classes */ static inline struct hfsc_class * eltree_get_minel(struct hfsc_sched *q) { struct rb_node *n; n = rb_first(&q->eligible); if (n == NULL) return NULL; return rb_entry(n, struct hfsc_class, el_node); } /* * vttree holds holds backlogged child classes being sorted by their virtual * time. each intermediate class has one vttree. */ static void vttree_insert(struct hfsc_class *cl) { struct rb_node **p = &cl->cl_parent->vt_tree.rb_node; struct rb_node *parent = NULL; struct hfsc_class *cl1; while (*p != NULL) { parent = *p; cl1 = rb_entry(parent, struct hfsc_class, vt_node); if (cl->cl_vt >= cl1->cl_vt) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&cl->vt_node, parent, p); rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree); } static inline void vttree_remove(struct hfsc_class *cl) { rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree); } static inline void vttree_update(struct hfsc_class *cl) { vttree_remove(cl); vttree_insert(cl); } static inline struct hfsc_class * vttree_firstfit(struct hfsc_class *cl, u64 cur_time) { struct hfsc_class *p; struct rb_node *n; for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) { p = rb_entry(n, struct hfsc_class, vt_node); if (p->cl_f <= cur_time) return p; } return NULL; } /* * get the leaf class with the minimum vt in the hierarchy */ static struct hfsc_class * vttree_get_minvt(struct hfsc_class *cl, u64 cur_time) { /* if root-class's cfmin is bigger than cur_time nothing to do */ if (cl->cl_cfmin > cur_time) return NULL; while (cl->level > 0) { cl = vttree_firstfit(cl, cur_time); if (cl == NULL) return NULL; /* * update parent's cl_cvtmin. */ if (cl->cl_parent->cl_cvtmin < cl->cl_vt) cl->cl_parent->cl_cvtmin = cl->cl_vt; } return cl; } static void cftree_insert(struct hfsc_class *cl) { struct rb_node **p = &cl->cl_parent->cf_tree.rb_node; struct rb_node *parent = NULL; struct hfsc_class *cl1; while (*p != NULL) { parent = *p; cl1 = rb_entry(parent, struct hfsc_class, cf_node); if (cl->cl_f >= cl1->cl_f) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&cl->cf_node, parent, p); rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree); } static inline void cftree_remove(struct hfsc_class *cl) { rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree); } static inline void cftree_update(struct hfsc_class *cl) { cftree_remove(cl); cftree_insert(cl); } /* * service curve support functions * * external service curve parameters * m: bps * d: us * internal service curve parameters * sm: (bytes/psched_us) << SM_SHIFT * ism: (psched_us/byte) << ISM_SHIFT * dx: psched_us * * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us. * * sm and ism are scaled in order to keep effective digits. * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective * digits in decimal using the following table. * * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps * ------------+------------------------------------------------------- * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3 * * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125 * * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18. */ #define SM_SHIFT (30 - PSCHED_SHIFT) #define ISM_SHIFT (8 + PSCHED_SHIFT) #define SM_MASK ((1ULL << SM_SHIFT) - 1) #define ISM_MASK ((1ULL << ISM_SHIFT) - 1) static inline u64 seg_x2y(u64 x, u64 sm) { u64 y; /* * compute * y = x * sm >> SM_SHIFT * but divide it for the upper and lower bits to avoid overflow */ y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT); return y; } static inline u64 seg_y2x(u64 y, u64 ism) { u64 x; if (y == 0) x = 0; else if (ism == HT_INFINITY) x = HT_INFINITY; else { x = (y >> ISM_SHIFT) * ism + (((y & ISM_MASK) * ism) >> ISM_SHIFT); } return x; } /* Convert m (bps) into sm (bytes/psched us) */ static u64 m2sm(u32 m) { u64 sm; sm = ((u64)m << SM_SHIFT); sm += PSCHED_TICKS_PER_SEC - 1; do_div(sm, PSCHED_TICKS_PER_SEC); return sm; } /* convert m (bps) into ism (psched us/byte) */ static u64 m2ism(u32 m) { u64 ism; if (m == 0) ism = HT_INFINITY; else { ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT); ism += m - 1; do_div(ism, m); } return ism; } /* convert d (us) into dx (psched us) */ static u64 d2dx(u32 d) { u64 dx; dx = ((u64)d * PSCHED_TICKS_PER_SEC); dx += USEC_PER_SEC - 1; do_div(dx, USEC_PER_SEC); return dx; } /* convert sm (bytes/psched us) into m (bps) */ static u32 sm2m(u64 sm) { u64 m; m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT; return (u32)m; } /* convert dx (psched us) into d (us) */ static u32 dx2d(u64 dx) { u64 d; d = dx * USEC_PER_SEC; do_div(d, PSCHED_TICKS_PER_SEC); return (u32)d; } static void sc2isc(struct tc_service_curve *sc, struct internal_sc *isc) { isc->sm1 = m2sm(sc->m1); isc->ism1 = m2ism(sc->m1); isc->dx = d2dx(sc->d); isc->dy = seg_x2y(isc->dx, isc->sm1); isc->sm2 = m2sm(sc->m2); isc->ism2 = m2ism(sc->m2); } /* * initialize the runtime service curve with the given internal * service curve starting at (x, y). */ static void rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) { rtsc->x = x; rtsc->y = y; rtsc->sm1 = isc->sm1; rtsc->ism1 = isc->ism1; rtsc->dx = isc->dx; rtsc->dy = isc->dy; rtsc->sm2 = isc->sm2; rtsc->ism2 = isc->ism2; } /* * calculate the y-projection of the runtime service curve by the * given x-projection value */ static u64 rtsc_y2x(struct runtime_sc *rtsc, u64 y) { u64 x; if (y < rtsc->y) x = rtsc->x; else if (y <= rtsc->y + rtsc->dy) { /* x belongs to the 1st segment */ if (rtsc->dy == 0) x = rtsc->x + rtsc->dx; else x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1); } else { /* x belongs to the 2nd segment */ x = rtsc->x + rtsc->dx + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2); } return x; } static u64 rtsc_x2y(struct runtime_sc *rtsc, u64 x) { u64 y; if (x <= rtsc->x) y = rtsc->y; else if (x <= rtsc->x + rtsc->dx) /* y belongs to the 1st segment */ y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1); else /* y belongs to the 2nd segment */ y = rtsc->y + rtsc->dy + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2); return y; } /* * update the runtime service curve by taking the minimum of the current * runtime service curve and the service curve starting at (x, y). */ static void rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y) { u64 y1, y2, dx, dy; u32 dsm; if (isc->sm1 <= isc->sm2) { /* service curve is convex */ y1 = rtsc_x2y(rtsc, x); if (y1 < y) /* the current rtsc is smaller */ return; rtsc->x = x; rtsc->y = y; return; } /* * service curve is concave * compute the two y values of the current rtsc * y1: at x * y2: at (x + dx) */ y1 = rtsc_x2y(rtsc, x); if (y1 <= y) { /* rtsc is below isc, no change to rtsc */ return; } y2 = rtsc_x2y(rtsc, x + isc->dx); if (y2 >= y + isc->dy) { /* rtsc is above isc, replace rtsc by isc */ rtsc->x = x; rtsc->y = y; rtsc->dx = isc->dx; rtsc->dy = isc->dy; return; } /* * the two curves intersect * compute the offsets (dx, dy) using the reverse * function of seg_x2y() * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y) */ dx = (y1 - y) << SM_SHIFT; dsm = isc->sm1 - isc->sm2; do_div(dx, dsm); /* * check if (x, y1) belongs to the 1st segment of rtsc. * if so, add the offset. */ if (rtsc->x + rtsc->dx > x) dx += rtsc->x + rtsc->dx - x; dy = seg_x2y(dx, isc->sm1); rtsc->x = x; rtsc->y = y; rtsc->dx = dx; rtsc->dy = dy; } static void init_ed(struct hfsc_class *cl, unsigned int next_len) { u64 cur_time = psched_get_time(); /* update the deadline curve */ rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); /* * update the eligible curve. * for concave, it is equal to the deadline curve. * for convex, it is a linear curve with slope m2. */ cl->cl_eligible = cl->cl_deadline; if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { cl->cl_eligible.dx = 0; cl->cl_eligible.dy = 0; } /* compute e and d */ cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); eltree_insert(cl); } static void update_ed(struct hfsc_class *cl, unsigned int next_len) { cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul); cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); eltree_update(cl); } static inline void update_d(struct hfsc_class *cl, unsigned int next_len) { cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len); } static inline void update_cfmin(struct hfsc_class *cl) { struct rb_node *n = rb_first(&cl->cf_tree); struct hfsc_class *p; if (n == NULL) { cl->cl_cfmin = 0; return; } p = rb_entry(n, struct hfsc_class, cf_node); cl->cl_cfmin = p->cl_f; } static void init_vf(struct hfsc_class *cl, unsigned int len) { struct hfsc_class *max_cl; struct rb_node *n; u64 vt, f, cur_time; int go_active; cur_time = 0; go_active = 1; for (; cl->cl_parent != NULL; cl = cl->cl_parent) { if (go_active && cl->cl_nactive++ == 0) go_active = 1; else go_active = 0; if (go_active) { n = rb_last(&cl->cl_parent->vt_tree); if (n != NULL) { max_cl = rb_entry(n, struct hfsc_class,vt_node); /* * set vt to the average of the min and max * classes. if the parent's period didn't * change, don't decrease vt of the class. */ vt = max_cl->cl_vt; if (cl->cl_parent->cl_cvtmin != 0) vt = (cl->cl_parent->cl_cvtmin + vt)/2; if (cl->cl_parent->cl_vtperiod != cl->cl_parentperiod || vt > cl->cl_vt) cl->cl_vt = vt; } else { /* * first child for a new parent backlog period. * add parent's cvtmax to cvtoff to make a new * vt (vtoff + vt) larger than the vt in the * last period for all children. */ vt = cl->cl_parent->cl_cvtmax; cl->cl_parent->cl_cvtoff += vt; cl->cl_parent->cl_cvtmax = 0; cl->cl_parent->cl_cvtmin = 0; cl->cl_vt = 0; } cl->cl_vtoff = cl->cl_parent->cl_cvtoff - cl->cl_pcvtoff; /* update the virtual curve */ vt = cl->cl_vt + cl->cl_vtoff; rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt, cl->cl_total); if (cl->cl_virtual.x == vt) { cl->cl_virtual.x -= cl->cl_vtoff; cl->cl_vtoff = 0; } cl->cl_vtadj = 0; cl->cl_vtperiod++; /* increment vt period */ cl->cl_parentperiod = cl->cl_parent->cl_vtperiod; if (cl->cl_parent->cl_nactive == 0) cl->cl_parentperiod++; cl->cl_f = 0; vttree_insert(cl); cftree_insert(cl); if (cl->cl_flags & HFSC_USC) { /* class has upper limit curve */ if (cur_time == 0) cur_time = psched_get_time(); /* update the ulimit curve */ rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total); /* compute myf */ cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, cl->cl_total); cl->cl_myfadj = 0; } } f = max(cl->cl_myf, cl->cl_cfmin); if (f != cl->cl_f) { cl->cl_f = f; cftree_update(cl); update_cfmin(cl->cl_parent); } } } static void update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) { u64 f; /* , myf_bound, delta; */ int go_passive = 0; if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) go_passive = 1; for (; cl->cl_parent != NULL; cl = cl->cl_parent) { cl->cl_total += len; if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0) continue; if (go_passive && --cl->cl_nactive == 0) go_passive = 1; else go_passive = 0; if (go_passive) { /* no more active child, going passive */ /* update cvtmax of the parent class */ if (cl->cl_vt > cl->cl_parent->cl_cvtmax) cl->cl_parent->cl_cvtmax = cl->cl_vt; /* remove this class from the vt tree */ vttree_remove(cl); cftree_remove(cl); update_cfmin(cl->cl_parent); continue; } /* * update vt and f */ cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) - cl->cl_vtoff + cl->cl_vtadj; /* * if vt of the class is smaller than cvtmin, * the class was skipped in the past due to non-fit. * if so, we need to adjust vtadj. */ if (cl->cl_vt < cl->cl_parent->cl_cvtmin) { cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt; cl->cl_vt = cl->cl_parent->cl_cvtmin; } /* update the vt tree */ vttree_update(cl); if (cl->cl_flags & HFSC_USC) { cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit, cl->cl_total); #if 0 /* * This code causes classes to stay way under their * limit when multiple classes are used at gigabit * speed. needs investigation. -kaber */ /* * if myf lags behind by more than one clock tick * from the current time, adjust myfadj to prevent * a rate-limited class from going greedy. * in a steady state under rate-limiting, myf * fluctuates within one clock tick. */ myf_bound = cur_time - PSCHED_JIFFIE2US(1); if (cl->cl_myf < myf_bound) { delta = cur_time - cl->cl_myf; cl->cl_myfadj += delta; cl->cl_myf += delta; } #endif } f = max(cl->cl_myf, cl->cl_cfmin); if (f != cl->cl_f) { cl->cl_f = f; cftree_update(cl); update_cfmin(cl->cl_parent); } } } static void set_active(struct hfsc_class *cl, unsigned int len) { if (cl->cl_flags & HFSC_RSC) init_ed(cl, len); if (cl->cl_flags & HFSC_FSC) init_vf(cl, len); list_add_tail(&cl->dlist, &cl->sched->droplist); } static void set_passive(struct hfsc_class *cl) { if (cl->cl_flags & HFSC_RSC) eltree_remove(cl); list_del(&cl->dlist); /* * vttree is now handled in update_vf() so that update_vf(cl, 0, 0) * needs to be called explicitly to remove a class from vttree. */ } static unsigned int qdisc_peek_len(struct Qdisc *sch) { struct sk_buff *skb; unsigned int len; skb = sch->ops->peek(sch); if (skb == NULL) { qdisc_warn_nonwc("qdisc_peek_len", sch); return 0; } len = qdisc_pkt_len(skb); return len; } static void hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) { unsigned int len = cl->qdisc->q.qlen; qdisc_reset(cl->qdisc); qdisc_tree_decrease_qlen(cl->qdisc, len); } static void hfsc_adjust_levels(struct hfsc_class *cl) { struct hfsc_class *p; unsigned int level; do { level = 0; list_for_each_entry(p, &cl->children, siblings) { if (p->level >= level) level = p->level + 1; } cl->level = level; } while ((cl = cl->cl_parent) != NULL); } static inline struct hfsc_class * hfsc_find_class(u32 classid, struct Qdisc *sch) { struct hfsc_sched *q = qdisc_priv(sch); struct Qdisc_class_common *clc; clc = qdisc_class_find(&q->clhash, classid); if (clc == NULL) return NULL; return container_of(clc, struct hfsc_class, cl_common); } static void hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc, u64 cur_time) { sc2isc(rsc, &cl->cl_rsc); rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul); cl->cl_eligible = cl->cl_deadline; if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) { cl->cl_eligible.dx = 0; cl->cl_eligible.dy = 0; } cl->cl_flags |= HFSC_RSC; } static void hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc) { sc2isc(fsc, &cl->cl_fsc); rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total); cl->cl_flags |= HFSC_FSC; } static void hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, u64 cur_time) { sc2isc(usc, &cl->cl_usc); rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total); cl->cl_flags |= HFSC_USC; } static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = { [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) }, [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) }, [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) }, }; static int hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl = (struct hfsc_class *)*arg; struct hfsc_class *parent = NULL; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_HFSC_MAX + 1]; struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL; u64 cur_time; int err; if (opt == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy); if (err < 0) return err; if (tb[TCA_HFSC_RSC]) { rsc = nla_data(tb[TCA_HFSC_RSC]); if (rsc->m1 == 0 && rsc->m2 == 0) rsc = NULL; } if (tb[TCA_HFSC_FSC]) { fsc = nla_data(tb[TCA_HFSC_FSC]); if (fsc->m1 == 0 && fsc->m2 == 0) fsc = NULL; } if (tb[TCA_HFSC_USC]) { usc = nla_data(tb[TCA_HFSC_USC]); if (usc->m1 == 0 && usc->m2 == 0) usc = NULL; } if (cl != NULL) { if (parentid) { if (cl->cl_parent && cl->cl_parent->cl_common.classid != parentid) return -EINVAL; if (cl->cl_parent == NULL && parentid != TC_H_ROOT) return -EINVAL; } cur_time = psched_get_time(); if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) return err; } sch_tree_lock(sch); if (rsc != NULL) hfsc_change_rsc(cl, rsc, cur_time); if (fsc != NULL) hfsc_change_fsc(cl, fsc); if (usc != NULL) hfsc_change_usc(cl, usc, cur_time); if (cl->qdisc->q.qlen != 0) { if (cl->cl_flags & HFSC_RSC) update_ed(cl, qdisc_peek_len(cl->qdisc)); if (cl->cl_flags & HFSC_FSC) update_vf(cl, 0, cur_time); } sch_tree_unlock(sch); return 0; } if (parentid == TC_H_ROOT) return -EEXIST; parent = &q->root; if (parentid) { parent = hfsc_find_class(parentid, sch); if (parent == NULL) return -ENOENT; } if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0) return -EINVAL; if (hfsc_find_class(classid, sch)) return -EEXIST; if (rsc == NULL && fsc == NULL) return -EINVAL; cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL); if (cl == NULL) return -ENOBUFS; if (tca[TCA_RATE]) { err = gen_new_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); if (err) { kfree(cl); return err; } } if (rsc != NULL) hfsc_change_rsc(cl, rsc, 0); if (fsc != NULL) hfsc_change_fsc(cl, fsc); if (usc != NULL) hfsc_change_usc(cl, usc, 0); cl->cl_common.classid = classid; cl->refcnt = 1; cl->sched = q; cl->cl_parent = parent; cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, &pfifo_qdisc_ops, classid); if (cl->qdisc == NULL) cl->qdisc = &noop_qdisc; INIT_LIST_HEAD(&cl->children); cl->vt_tree = RB_ROOT; cl->cf_tree = RB_ROOT; sch_tree_lock(sch); qdisc_class_hash_insert(&q->clhash, &cl->cl_common); list_add_tail(&cl->siblings, &parent->children); if (parent->level == 0) hfsc_purge_queue(sch, parent); hfsc_adjust_levels(parent); cl->cl_pcvtoff = parent->cl_cvtoff; sch_tree_unlock(sch); qdisc_class_hash_grow(sch, &q->clhash); *arg = (unsigned long)cl; return 0; } static void hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) { struct hfsc_sched *q = qdisc_priv(sch); tcf_destroy_chain(&cl->filter_list); qdisc_destroy(cl->qdisc); gen_kill_estimator(&cl->bstats, &cl->rate_est); if (cl != &q->root) kfree(cl); } static int hfsc_delete_class(struct Qdisc *sch, unsigned long arg) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root) return -EBUSY; sch_tree_lock(sch); list_del(&cl->siblings); hfsc_adjust_levels(cl->cl_parent); hfsc_purge_queue(sch, cl); qdisc_class_hash_remove(&q->clhash, &cl->cl_common); BUG_ON(--cl->refcnt == 0); /* * This shouldn't happen: we "hold" one cops->get() when called * from tc_ctl_tclass; the destroy method is done from cops->put(). */ sch_tree_unlock(sch); return 0; } static struct hfsc_class * hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *head, *cl; struct tcf_result res; struct tcf_proto *tcf; int result; if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 && (cl = hfsc_find_class(skb->priority, sch)) != NULL) if (cl->level == 0) return cl; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; head = &q->root; tcf = q->root.filter_list; while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } #endif if ((cl = (struct hfsc_class *)res.class) == NULL) { if ((cl = hfsc_find_class(res.classid, sch)) == NULL) break; /* filter selected invalid classid */ if (cl->level >= head->level) break; /* filter may only point downwards */ } if (cl->level == 0) return cl; /* hit leaf class */ /* apply inner filter chain */ tcf = cl->filter_list; head = cl; } /* classification failed, try default class */ cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); if (cl == NULL || cl->level > 0) return NULL; return cl; } static int hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl->level > 0) return -EINVAL; if (new == NULL) { new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, &pfifo_qdisc_ops, cl->cl_common.classid); if (new == NULL) new = &noop_qdisc; } sch_tree_lock(sch); hfsc_purge_queue(sch, cl); *old = cl->qdisc; cl->qdisc = new; sch_tree_unlock(sch); return 0; } static struct Qdisc * hfsc_class_leaf(struct Qdisc *sch, unsigned long arg) { struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl->level == 0) return cl->qdisc; return NULL; } static void hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) { struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl->qdisc->q.qlen == 0) { update_vf(cl, 0, 0); set_passive(cl); } } static unsigned long hfsc_get_class(struct Qdisc *sch, u32 classid) { struct hfsc_class *cl = hfsc_find_class(classid, sch); if (cl != NULL) cl->refcnt++; return (unsigned long)cl; } static void hfsc_put_class(struct Qdisc *sch, unsigned long arg) { struct hfsc_class *cl = (struct hfsc_class *)arg; if (--cl->refcnt == 0) hfsc_destroy_class(sch, cl); } static unsigned long hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid) { struct hfsc_class *p = (struct hfsc_class *)parent; struct hfsc_class *cl = hfsc_find_class(classid, sch); if (cl != NULL) { if (p != NULL && p->level <= cl->level) return 0; cl->filter_cnt++; } return (unsigned long)cl; } static void hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) { struct hfsc_class *cl = (struct hfsc_class *)arg; cl->filter_cnt--; } static struct tcf_proto ** hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl = (struct hfsc_class *)arg; if (cl == NULL) cl = &q->root; return &cl->filter_list; } static int hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc) { struct tc_service_curve tsc; tsc.m1 = sm2m(sc->sm1); tsc.d = dx2d(sc->dx); tsc.m2 = sm2m(sc->sm2); NLA_PUT(skb, attr, sizeof(tsc), &tsc); return skb->len; nla_put_failure: return -1; } static inline int hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) { if ((cl->cl_flags & HFSC_RSC) && (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0)) goto nla_put_failure; if ((cl->cl_flags & HFSC_FSC) && (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0)) goto nla_put_failure; if ((cl->cl_flags & HFSC_USC) && (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0)) goto nla_put_failure; return skb->len; nla_put_failure: return -1; } static int hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) { struct hfsc_class *cl = (struct hfsc_class *)arg; struct nlattr *nest; tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid : TC_H_ROOT; tcm->tcm_handle = cl->cl_common.classid; if (cl->level == 0) tcm->tcm_info = cl->qdisc->handle; nest = nla_nest_start(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; if (hfsc_dump_curves(skb, cl) < 0) goto nla_put_failure; nla_nest_end(skb, nest); return skb->len; nla_put_failure: nla_nest_cancel(skb, nest); return -EMSGSIZE; } static int hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct hfsc_class *cl = (struct hfsc_class *)arg; struct tc_hfsc_stats xstats; cl->qstats.qlen = cl->qdisc->q.qlen; xstats.level = cl->level; xstats.period = cl->cl_vtperiod; xstats.work = cl->cl_total; xstats.rtwork = cl->cl_cumul; if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 || gnet_stats_copy_queue(d, &cl->qstats) < 0) return -1; return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); } static void hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct hfsc_sched *q = qdisc_priv(sch); struct hlist_node *n; struct hfsc_class *cl; unsigned int i; if (arg->stop) return; for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, (unsigned long)cl, arg) < 0) { arg->stop = 1; return; } arg->count++; } } } static void hfsc_schedule_watchdog(struct Qdisc *sch) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl; u64 next_time = 0; if ((cl = eltree_get_minel(q)) != NULL) next_time = cl->cl_e; if (q->root.cl_cfmin != 0) { if (next_time == 0 || next_time > q->root.cl_cfmin) next_time = q->root.cl_cfmin; } WARN_ON(next_time == 0); qdisc_watchdog_schedule(&q->watchdog, next_time); } static int hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) { struct hfsc_sched *q = qdisc_priv(sch); struct tc_hfsc_qopt *qopt; int err; if (opt == NULL || nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); q->defcls = qopt->defcls; err = qdisc_class_hash_init(&q->clhash); if (err < 0) return err; q->eligible = RB_ROOT; INIT_LIST_HEAD(&q->droplist); q->root.cl_common.classid = sch->handle; q->root.refcnt = 1; q->root.sched = q; q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, &pfifo_qdisc_ops, sch->handle); if (q->root.qdisc == NULL) q->root.qdisc = &noop_qdisc; INIT_LIST_HEAD(&q->root.children); q->root.vt_tree = RB_ROOT; q->root.cf_tree = RB_ROOT; qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); qdisc_class_hash_grow(sch, &q->clhash); qdisc_watchdog_init(&q->watchdog, sch); return 0; } static int hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt) { struct hfsc_sched *q = qdisc_priv(sch); struct tc_hfsc_qopt *qopt; if (opt == NULL || nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); sch_tree_lock(sch); q->defcls = qopt->defcls; sch_tree_unlock(sch); return 0; } static void hfsc_reset_class(struct hfsc_class *cl) { cl->cl_total = 0; cl->cl_cumul = 0; cl->cl_d = 0; cl->cl_e = 0; cl->cl_vt = 0; cl->cl_vtadj = 0; cl->cl_vtoff = 0; cl->cl_cvtmin = 0; cl->cl_cvtmax = 0; cl->cl_cvtoff = 0; cl->cl_pcvtoff = 0; cl->cl_vtperiod = 0; cl->cl_parentperiod = 0; cl->cl_f = 0; cl->cl_myf = 0; cl->cl_myfadj = 0; cl->cl_cfmin = 0; cl->cl_nactive = 0; cl->vt_tree = RB_ROOT; cl->cf_tree = RB_ROOT; qdisc_reset(cl->qdisc); if (cl->cl_flags & HFSC_RSC) rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0); if (cl->cl_flags & HFSC_FSC) rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0); if (cl->cl_flags & HFSC_USC) rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0); } static void hfsc_reset_qdisc(struct Qdisc *sch) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl; struct hlist_node *n; unsigned int i; for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) hfsc_reset_class(cl); } q->eligible = RB_ROOT; INIT_LIST_HEAD(&q->droplist); qdisc_watchdog_cancel(&q->watchdog); sch->q.qlen = 0; } static void hfsc_destroy_qdisc(struct Qdisc *sch) { struct hfsc_sched *q = qdisc_priv(sch); struct hlist_node *n, *next; struct hfsc_class *cl; unsigned int i; for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) tcf_destroy_chain(&cl->filter_list); } for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i], cl_common.hnode) hfsc_destroy_class(sch, cl); } qdisc_class_hash_destroy(&q->clhash); qdisc_watchdog_cancel(&q->watchdog); } static int hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) { struct hfsc_sched *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_hfsc_qopt qopt; qopt.defcls = q->defcls; NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct hfsc_class *cl; int uninitialized_var(err); cl = hfsc_classify(skb, sch, &err); if (cl == NULL) { if (err & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return err; } err = qdisc_enqueue(skb, cl->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; sch->qstats.drops++; } return err; } if (cl->qdisc->q.qlen == 1) set_active(cl, qdisc_pkt_len(skb)); cl->bstats.packets++; cl->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.packets++; sch->bstats.bytes += qdisc_pkt_len(skb); sch->q.qlen++; return NET_XMIT_SUCCESS; } static struct sk_buff * hfsc_dequeue(struct Qdisc *sch) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl; struct sk_buff *skb; u64 cur_time; unsigned int next_len; int realtime = 0; if (sch->q.qlen == 0) return NULL; cur_time = psched_get_time(); /* * if there are eligible classes, use real-time criteria. * find the class with the minimum deadline among * the eligible classes. */ if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { realtime = 1; } else { /* * use link-sharing criteria * get the class with the minimum vt in the hierarchy */ cl = vttree_get_minvt(&q->root, cur_time); if (cl == NULL) { sch->qstats.overlimits++; hfsc_schedule_watchdog(sch); return NULL; } } skb = qdisc_dequeue_peeked(cl->qdisc); if (skb == NULL) { qdisc_warn_nonwc("HFSC", cl->qdisc); return NULL; } update_vf(cl, qdisc_pkt_len(skb), cur_time); if (realtime) cl->cl_cumul += qdisc_pkt_len(skb); if (cl->qdisc->q.qlen != 0) { if (cl->cl_flags & HFSC_RSC) { /* update ed */ next_len = qdisc_peek_len(cl->qdisc); if (realtime) update_ed(cl, next_len); else update_d(cl, next_len); } } else { /* the class becomes passive */ set_passive(cl); } sch->flags &= ~TCQ_F_THROTTLED; sch->q.qlen--; return skb; } static unsigned int hfsc_drop(struct Qdisc *sch) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl; unsigned int len; list_for_each_entry(cl, &q->droplist, dlist) { if (cl->qdisc->ops->drop != NULL && (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) { if (cl->qdisc->q.qlen == 0) { update_vf(cl, 0, 0); set_passive(cl); } else { list_move_tail(&cl->dlist, &q->droplist); } cl->qstats.drops++; sch->qstats.drops++; sch->q.qlen--; return len; } } return 0; } static const struct Qdisc_class_ops hfsc_class_ops = { .change = hfsc_change_class, .delete = hfsc_delete_class, .graft = hfsc_graft_class, .leaf = hfsc_class_leaf, .qlen_notify = hfsc_qlen_notify, .get = hfsc_get_class, .put = hfsc_put_class, .bind_tcf = hfsc_bind_tcf, .unbind_tcf = hfsc_unbind_tcf, .tcf_chain = hfsc_tcf_chain, .dump = hfsc_dump_class, .dump_stats = hfsc_dump_class_stats, .walk = hfsc_walk }; static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = { .id = "hfsc", .init = hfsc_init_qdisc, .change = hfsc_change_qdisc, .reset = hfsc_reset_qdisc, .destroy = hfsc_destroy_qdisc, .dump = hfsc_dump_qdisc, .enqueue = hfsc_enqueue, .dequeue = hfsc_dequeue, .peek = qdisc_peek_dequeued, .drop = hfsc_drop, .cl_ops = &hfsc_class_ops, .priv_size = sizeof(struct hfsc_sched), .owner = THIS_MODULE }; static int __init hfsc_init(void) { return register_qdisc(&hfsc_qdisc_ops); } static void __exit hfsc_cleanup(void) { unregister_qdisc(&hfsc_qdisc_ops); } MODULE_LICENSE("GPL"); module_init(hfsc_init); module_exit(hfsc_cleanup);
gpl-2.0
cameron581/kernel
arch/arm/mach-msm/msm_memory_dump.c
2043
2072
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <asm/cacheflush.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/init.h> #include <linux/export.h> #include <mach/msm_iomap.h> #include <mach/msm_memory_dump.h> /*TODO: Needs to be set to correct value */ #define DUMP_TABLE_OFFSET 0x14 #define MSM_DUMP_TABLE_VERSION MK_TABLE(1, 0) static struct msm_memory_dump mem_dump_data; int msm_dump_table_register(struct msm_client_dump *client_entry) { struct msm_client_dump *entry; struct msm_dump_table *table = mem_dump_data.dump_table_ptr; if (!table || table->num_entries >= MAX_NUM_CLIENTS) return -EINVAL; entry = &table->client_entries[table->num_entries]; entry->id = client_entry->id; entry->start_addr = client_entry->start_addr; entry->end_addr = client_entry->end_addr; table->num_entries++; /* flush cache */ dmac_flush_range(table, table + sizeof(struct msm_dump_table)); return 0; } EXPORT_SYMBOL(msm_dump_table_register); static int __init init_memory_dump(void) { struct msm_dump_table *table; mem_dump_data.dump_table_ptr = kzalloc(sizeof(struct msm_dump_table), GFP_KERNEL); if (!mem_dump_data.dump_table_ptr) { printk(KERN_ERR "unable to allocate memory for dump table\n"); return -ENOMEM; } table = mem_dump_data.dump_table_ptr; table->version = MSM_DUMP_TABLE_VERSION; mem_dump_data.dump_table_phys = virt_to_phys(table); writel_relaxed(mem_dump_data.dump_table_phys, MSM_IMEM_BASE + DUMP_TABLE_OFFSET); printk(KERN_INFO "MSM Memory Dump table set up\n"); return 0; } early_initcall(init_memory_dump);
gpl-2.0
NebulaOy/linux
arch/arm/mach-omap2/emu.c
3067
1300
/* * emu.c * * ETM and ETB CoreSight components' resources as found in OMAP3xxx. * * Copyright (C) 2009 Nokia Corporation. * Alexander Shishkin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/module.h> #include <linux/device.h> #include <linux/amba/bus.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> #include "soc.h" #include "iomap.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexander Shishkin"); /* Cortex CoreSight components within omap3xxx EMU */ #define ETM_BASE (L4_EMU_34XX_PHYS + 0x10000) #define DBG_BASE (L4_EMU_34XX_PHYS + 0x11000) #define ETB_BASE (L4_EMU_34XX_PHYS + 0x1b000) #define DAPCTL (L4_EMU_34XX_PHYS + 0x1d000) static AMBA_APB_DEVICE(omap3_etb, "etb", 0x000bb907, ETB_BASE, { }, NULL); static AMBA_APB_DEVICE(omap3_etm, "etm", 0x102bb921, ETM_BASE, { }, NULL); static int __init emu_init(void) { if (!cpu_is_omap34xx()) return -ENODEV; amba_device_register(&omap3_etb_device, &iomem_resource); amba_device_register(&omap3_etm_device, &iomem_resource); return 0; } omap_subsys_initcall(emu_init);
gpl-2.0